Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
function findNotes(grade, notes, words) {
if (grade !== 'KF') return null;
const documents = notes.map(note => {
return {
id: note.id,
text: note.text
};
});
const index = lunr(function() {
this.ref('id');
this.field('text');
this.metadataWhitelist = ['position'];
documents.forEach(doc => this.add(doc));
});
// window.index = index;
const results = _.flatMap(words, word => index.search(word));
// console.log('results', results);
// return JSON.stringify(results);
const matches = _.flatMap(results, result => {
const note = notes.filter(note => note.id === parseInt(result.ref, 10))[0]; // need a better way :)
if (!note) {
// console.log('no note for result:', results);
async indexCiphers(): Promise {
if (this.indexing) {
return;
}
// tslint:disable-next-line
console.time('search indexing');
this.indexing = true;
this.index = null;
const builder = new lunr.Builder();
builder.ref('id');
(builder as any).field('shortid', { boost: 100, extractor: (c: CipherView) => c.id.substr(0, 8) });
(builder as any).field('name', { boost: 10 });
(builder as any).field('subtitle', {
boost: 5,
extractor: (c: CipherView) => {
if (c.subTitle != null && c.type === CipherType.Card) {
return c.subTitle.replace(/\*/g, '');
}
return c.subTitle;
},
});
builder.field('notes');
(builder as any).field('login.username', {
extractor: (c: CipherView) => c.type === CipherType.Login && c.login != null ? c.login.username : null,
});
const ENV = process.env.NODE_ENV;
const debug = debugLib('DocsService');
const indexDb = getSearchIndexPath();
marked.setOptions({
highlight: (code) => {
return highlight.highlightAuto(code).value;
}
});
// Generate a hash of valid api routes, from the /configs/apis.js file
let cache = {};
let documents = {};
// setup lunr index
const index = lunr(function () {
debug('Creating lunr index');
this.ref('id');
this.field('title', { boost: 10 });
this.field('description', { boost: 5 });
this.field('body');
this.field('permalink');
});
/**
* Generic function to call GitHub's 'repos' API
* https://developer.github.com/v3/repos/
*
* @function fetchGitHubReposApi
* @param {Object} params
* @param {String} [params.repo=yahoo/fluxible] Repository
* @param {String} params.type Type of data to fetch
constructor(props) {
super(props)
// init client-side search indexing
this._documents = {}
this._index = lunr(function () {
this.ref('path')
this.field('title', {boost: 10})
this.field('text')
this.pipeline.remove(lunr.stopWordFilter)
})
this.state = {query: ''}
}
(
patronFilter,
serviceFilter,
statusFilter,
healthIssueFilter,
selectedNeeds,
benefits,
needs,
eligibilityPaths,
searchString,
currentLanguage,
enIdx,
frIdx
) => {
// Reinitalize indexes after they are serialized by Redux
enIdx = lunr.Index.load(JSON.parse(enIdx));
frIdx = lunr.Index.load(JSON.parse(frIdx));
let selectedEligibility = {
patronType: patronFilter,
serviceType: serviceFilter,
statusAndVitals: statusFilter,
serviceHealthIssue: healthIssueFilter
};
let eligibilityMatch = (path, selected) => {
let matches = true;
[
"serviceType",
"patronType",
"statusAndVitals",
"serviceHealthIssue"
].forEach(criteria => {
static singleTermSearch(term, index, fields) {
if (!fields) {
fields = index.fields;
}
if (term.length > 15) {
term = term.substring(0, 14) + lunr.Query.wildcard;
}
return index.query(query => {
query.term(term,
{
fields: fields,
usePipeline: true,
boost: 100
});
if (term.includes(lunr.Query.wildcard)) {
// wildcard in term, disable stemming
query.term(term,
{
fields: fields,
usePipeline: false,
let results = index.query(function() {
// exact matches should have the highest boost
this.term(lunr.tokenizer(query), { boost: 100 });
// prefix matches should be boosted slightly
this.term(query, { boost: 10, usePipeline: false, wildcard: lunr.Query.wildcard.TRAILING });
// finally, try a fuzzy search with character 2, without any boost
this.term(query, { boost: 5, usePipeline: false, editDistance: 2 });
});
return results.slice(0, numValues);
// Fall back to basic search if index is not available
return this.searchCiphersBasic(ciphers, query);
}
const ciphersMap = new Map();
ciphers.forEach((c) => ciphersMap.set(c.id, c));
let searchResults: lunr.Index.Result[] = null;
const isQueryString = query != null && query.length > 1 && query.indexOf('>') === 0;
if (isQueryString) {
try {
searchResults = index.search(query.substr(1).trim());
} catch { }
} else {
// tslint:disable-next-line
const soWild = lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING;
searchResults = index.query((q) => {
lunr.tokenizer(query).forEach((token) => {
const t = token.toString();
q.term(t, { fields: ['name'], wildcard: soWild });
q.term(t, { fields: ['subtitle'], wildcard: soWild });
q.term(t, { fields: ['login.uris'], wildcard: soWild });
q.term(t, {});
});
});
}
if (searchResults != null) {
searchResults.forEach((r) => {
if (ciphersMap.has(r.ref)) {
results.push(ciphersMap.get(r.ref));
}
let results = index.query(function() {
// exact matches should have the highest boost
this.term(lunr.tokenizer(query), { boost: 100 });
// prefix matches should be boosted slightly
this.term(query, { boost: 10, usePipeline: false, wildcard: lunr.Query.wildcard.TRAILING });
// finally, try a fuzzy search with character 2, without any boost
this.term(query, { boost: 5, usePipeline: false, editDistance: 2 });
});
return results.slice(0, numValues);
function makeSnippet( doc, query ) {
// generate a regex of the form /[^a-zA-Z](term1|term2)/ for the query "term1 term2"
const termRegexMatchers = lunr.tokenizer( query ).map( term => escapeRegexString( term ) );
const termRegexString = '[^a-zA-Z](' + termRegexMatchers.join( '|' ) + ')';
const termRegex = new RegExp( termRegexString, 'gi' );
const snippets = [];
let match;
// find up to 4 matches in the document and extract snippets to be joined together
// TODO: detect when snippets overlap and merge them.
while ( ( match = termRegex.exec( doc.body ) ) !== null && snippets.length < 4 ) {
const matchStr = match[ 1 ],
index = match.index + 1,
before = doc.body.substring( index - SNIPPET_PAD_LENGTH, index ),
after = doc.body.substring(
index + matchStr.length,
index + matchStr.length + SNIPPET_PAD_LENGTH
);