Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
}
return 1;
};
const rules = ruleset(
// Isolate the actual blog post body text. Based on Fathom's example
// Readability rules
rule(dom('p,li,ol,ul,code,blockquote,pre,h1,h2,h3,h4,h5,h6'),
props(scoreByLength).type('paragraphish')),
rule(type('paragraphish'), score(byInverseLinkDensity)),
rule(dom('p'), score(4.5).type('paragraphish')),
// Tweaks for this particular blog
rule(type('paragraphish'), score(hasAncestor('article', 10))),
rule(dom('.entry-summary p'), score(0).type('paragraphish')),
rule(dom('figure'), props(scoreByImageSize).type('paragraphish')),
// Find the best cluster of paragraph-ish nodes
rule(
type('paragraphish').bestCluster({
splittingDistance: 3,
differentDepthCost: 6.5,
differentTagCost: 2,
sameTagCost: 0.5,
strideCost: 0,
}),
out('content').allThrough(Futils.domSort)));
async function ingestArticle(hatch, {title, link, date, author}) {
let $ = await Libingester.util.fetch_html(link);
const baseURI = Libingester.util.get_doc_base_uri($, link);
return 1;
};
const rules = ruleset(
// Isolate the actual blog post body text. Based on Fathom's example
// Readability rules
rule(dom('p,li,ol,ul,code,blockquote,pre,h1,h2,h3,h4,h5,h6'),
props(scoreByLength).type('paragraphish')),
rule(type('paragraphish'), score(byInverseLinkDensity)),
rule(dom('p'), score(4.5).type('paragraphish')),
// Tweaks for this particular blog
rule(type('paragraphish'), score(hasAncestor('article', 10))),
rule(dom('.entry-summary p'), score(0).type('paragraphish')),
rule(dom('figure'), props(scoreByImageSize).type('paragraphish')),
rule(dom('.jetpack-video-wrapper'), props(() => ({
score: 100,
note: {length: 1},
})).type('paragraphish')),
// Find the best cluster of paragraph-ish nodes
rule(
type('paragraphish').bestCluster({
splittingDistance: 3,
differentDepthCost: 6.5,
differentTagCost: 2,
sameTagCost: 0.5,
strideCost: 0,
}),
out('content').allThrough(Futils.domSort)));
async function ingestArticle(hatch, {title, link, date, author}) {
const lowerTag = tagName.toLowerCase();
for (let element = fnode.element, parent;
(parent = element.parentNode) != null &&
parent.nodeType === parent.ELEMENT_NODE;
element = parent) {
if (element.tagName.toLowerCase() === lowerTag)
return scoreIfHas;
}
return 1;
};
const rules = ruleset(
// Isolate the actual blog post body text. Based on Fathom's example
// Readability rules
rule(dom('p,li,ol,ul,code,blockquote,pre,h1,h2,h3,h4,h5,h6'),
props(scoreByLength).type('paragraphish')),
rule(type('paragraphish'), score(byInverseLinkDensity)),
rule(dom('p'), score(4.5).type('paragraphish')),
// Tweaks for this particular blog
rule(type('paragraphish'), score(hasAncestor('article', 10))),
rule(dom('.entry-summary p'), score(0).type('paragraphish')),
rule(dom('figure'), props(scoreByImageSize).type('paragraphish')),
rule(dom('.jetpack-video-wrapper'), props(() => ({
score: 100,
note: {length: 1},
})).type('paragraphish')),
// Find the best cluster of paragraph-ish nodes
rule(
type('paragraphish').bestCluster({
splittingDistance: 3,
/**
* @param {fnode} fnode
* @return {any} Object containing a `score` key derived from the element's text length
*/
const scoreByLength = ({ element }) => ({
score: inlineTextLength(element),
})
// Based on: https://hacks.mozilla.org/2017/04/fathom-a-framework-for-understanding-web-pages/
// Meant to be similar to Readability-like extraction of a page's main-content
// Initial tests of this are pretty innaccurate; lots to learn to be able to tweak the rules and use it well
const rules = ruleset(
rule(
dom('p,div,li,blockquote,h1,h2,h3,h4,h5,h6'),
props(scoreByLength).type('paragraphish'),
),
rule(
type('paragraphish'),
score(fnode => {
const paragraphishNote = fnode.noteFor('paragraphish')
return paragraphishNote
? (1 - linkDensity(fnode, paragraphishNote.inlineLength)) * 1.5
: (1 - linkDensity(fnode)) * 1.5
}),
),
rule(dom('p'), score(4.5).type('paragraphish')),
rule(
type('paragraphish').bestCluster({
splittingDistance: 3,
differentDepthCost: 6.5,
differentTagCost: 2,
}
return 1;
};
const rules = ruleset(
// Isolate the actual blog post body text. Based on Fathom's example
// Readability rules
rule(dom('p,li,ol,ul,code,blockquote,pre,h1,h2,h3,h4,h5,h6'),
props(scoreByLength).type('paragraphish')),
rule(type('paragraphish'), score(byInverseLinkDensity)),
rule(dom('p'), score(4.5).type('paragraphish')),
// Tweaks for this particular blog
rule(type('paragraphish'), score(hasAncestor('article', 10))),
rule(dom('.entry-summary p'), score(0).type('paragraphish')),
rule(dom('figure'), props(scoreByImageSize).type('paragraphish')),
rule(dom('.jetpack-video-wrapper'), props(() => ({
score: 100,
note: {length: 1},
})).type('paragraphish')),
// Find the best cluster of paragraph-ish nodes
rule(
type('paragraphish').bestCluster({
splittingDistance: 3,
differentDepthCost: 6.5,
differentTagCost: 2,
sameTagCost: 0.5,
strideCost: 0,
}),
out('content').allThrough(Futils.domSort)));
const lowerTag = tagName.toLowerCase();
for (let element = fnode.element, parent;
(parent = element.parentNode) != null &&
parent.nodeType === parent.ELEMENT_NODE;
element = parent) {
if (element.tagName.toLowerCase() === lowerTag)
return scoreIfHas;
}
return 1;
};
const rules = ruleset(
// Isolate the actual blog post body text. Based on Fathom's example
// Readability rules
rule(dom('p,li,ol,ul,code,blockquote,pre,h1,h2,h3,h4,h5,h6'),
props(scoreByLength).type('paragraphish')),
rule(type('paragraphish'), score(byInverseLinkDensity)),
rule(dom('p'), score(4.5).type('paragraphish')),
// Tweaks for this particular blog
rule(type('paragraphish'), score(hasAncestor('article', 10))),
rule(dom('.entry-summary p'), score(0).type('paragraphish')),
rule(dom('figure'), props(scoreByImageSize).type('paragraphish')),
// Find the best cluster of paragraph-ish nodes
rule(
type('paragraphish').bestCluster({
splittingDistance: 3,
differentDepthCost: 6.5,
differentTagCost: 2,
sameTagCost: 0.5,
strideCost: 0,