Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
const results = await Anime.find({
keywords: { $in: cutString },
// $or: [
// { title: /searchString/ },
// { keywords: { $in: cutString } }
// ]
});
const list = [];
for (const record of results) {
const cutAlias = record.keywords;
let total = [...new Set([...cutAlias, ...cutString])].length;
let maxDistance = 0;
for (const alias of record.alias) {
maxDistance = Math.max(maxDistance, jsimilarity(searchString, alias))
// console.log(searchString, alias, maxDistance)
}
list.push([maxDistance * (cutString.length + cutAlias.length - total), record.title, record._id, record.bgmid]);
}
let ended = false;
let ending = page * 10;
if (list.length < (page - 1) * 10) {
ended = true;
ctx.body = { items: [], ended };
return;
} else if (list.length <= ending) {
ended = true;
ending = list.length;
searchItems() {
const query = this.state.search;
if (!query || query.length < 3)
return this.setState({filteredItems: []});
const regex = new RegExp(escapeRegexp(query).replace(/\s/g, '\\s'), 'i');
const filteredItems = (this.props.paragraphs || [])
.filter(data => {
return regex.test(data.paragraph.text);
});
return this.setState({filteredItems: filteredItems});
}
searchItems() {
const query = this.state.search;
if (!query || query.length < 3)
return this.setState({filteredItems: []});
const regex = new RegExp(escapeRegexp(query).replace(/\s/g, '\\s'), 'i');
const filteredItems = (this.props.voc || [])
.filter(voc => {
if (regex.test(voc.title))
return true;
return voc.children.some(paragraph => {
return regex.test(paragraph.text);
});
});
return this.setState({filteredItems: filteredItems});
}
function normalizedLevenshtein(a, b) {
return 1 - (levenshtein(a, b) / Math.max(a.length, b.length, 1));
}
const similarityValues = targetItems.map((targetItem, i) => [targetItem.bgmid, similarity(targetItem.vector, selfVector)]);
const buildTokenizer = text => {
const tokens = words(text);
const tokenizer = new LegalipyTokenizer();
tokenizer.train(tokens);
tokenizer.finalize();
return tokenizer.tokenize.bind(tokenizer);
};
const buildTokenizer = text => {
const tokens = words(text);
const tokenizer = new LegalipyTokenizer();
tokenizer.train(tokens);
tokenizer.finalize();
return tokenizer.tokenize.bind(tokenizer);
};