Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
let results = index.query(function() {
// exact matches should have the highest boost
this.term(lunr.tokenizer(query), { boost: 100 });
// prefix matches should be boosted slightly
this.term(query, { boost: 10, usePipeline: false, wildcard: lunr.Query.wildcard.TRAILING });
// finally, try a fuzzy search with character 2, without any boost
this.term(query, { boost: 5, usePipeline: false, editDistance: 2 });
});
return results.slice(0, numValues);
function makeSnippet( doc, query ) {
// generate a regex of the form /[^a-zA-Z](term1|term2)/ for the query "term1 term2"
const termRegexMatchers = lunr.tokenizer( query ).map( term => escapeRegexString( term ) );
const termRegexString = '[^a-zA-Z](' + termRegexMatchers.join( '|' ) + ')';
const termRegex = new RegExp( termRegexString, 'gi' );
const snippets = [];
let match;
// find up to 4 matches in the document and extract snippets to be joined together
// TODO: detect when snippets overlap and merge them.
while ( ( match = termRegex.exec( doc.body ) ) !== null && snippets.length < 4 ) {
const matchStr = match[ 1 ],
index = match.index + 1,
before = doc.body.substring( index - SNIPPET_PAD_LENGTH, index ),
after = doc.body.substring(
index + matchStr.length,
index + matchStr.length + SNIPPET_PAD_LENGTH
);
searchResults = index.query((q) => {
lunr.tokenizer(query).forEach((token) => {
const t = token.toString();
q.term(t, { fields: ['name'], wildcard: soWild });
q.term(t, { fields: ['subtitle'], wildcard: soWild });
q.term(t, { fields: ['login.uris'], wildcard: soWild });
q.term(t, {});
});
});
}
function getTokenStream(text, index) {
return index.pipeline.run(lunr.tokenizer(text));
}
function getTokenStream(text, index) {
return index.pipeline.run(lunr.tokenizer(text));
}