Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
nets({ url: url, json: true }, function (err, resp, body) {
if (err) return exit(err)
if (resp.statusCode !== 200) return exit(body.message)
try {
opts.key = stringKey(body.url)
debug('Received key from registry:', opts.key)
if (opts.key) return createDir(url.split('/').pop(), run) // dirname is name of repo
} catch (e) {
console.error(new Error(e))
}
exit('Error getting key from registry')
})
}
leave (discoveryKey) {
const dKeyStr = datEncoding.toStr(discoveryKey)
const feed = this._replicatingFeeds.get(dKeyStr)
if (!feed) return
if (feed.replicationStreams) {
feed.replicationStreams.forEach(stream => stream.destroy()) // stop all active replications
feed.replicationStreams.length = 0
}
this._swarm.leave(feed.discoveryKey)
this.emit('leave', { key: feed.key.toString('hex'), discoveryKey: dKeyStr })
}
async _computeArchiveLatestStats (archive) {
var start = Date.now()
var release = await lock('archiver-compute-stats')
const archiveKey = datEncoding.toStr(archive.key)
const {metadata, content} = archive
try {
if (!metadata || !content) {
// sanity check
console.error('Had to abort computing archive stats', {archiveKey, metadata: !!metadata, content: !!content})
return
}
// checkout the archive for consistency
var co = archive.checkout(archive.version)
// reset all stats
const st = archive.latestStats
for (var k in st) {
st[k] = 0
}
archive.replicationStreams.forEach(stream => stream.destroy()) // stop all active replications
archive.replicationStreams.length = 0
archive.isSwarming = false
this.swarm.leave(archive.discoveryKey)
}
// done?
if (opts.download === false && opts.upload === false) {
if (wasSwarming) {
console.log(figures.info, 'Unswarming archive', datEncoding.toStr(archive.key))
}
return
}
// join the swarm
debounceConsole.log(`${figures.info} Swarming archive`, {timeout: 250, max: 1e3}, datEncoding.toStr(archive.key))
archive.isSwarming = true
archive.swarmOpts = opts
this.swarm.listen(archive.discoveryKey, 0, () => {})
}
// keep peerHistory from getting too long
if (archive.peerHistory.length >= 500) {
// downsize to 360 points, which at 10s intervals covers one hour
archive.peerHistory = archive.peerHistory.slice(archive.peerHistory.length - 360)
}
// count # of peers
var totalPeerCount = 0
for (var k in archives) {
totalPeerCount += archives[k].metadata.peers.length
}
daemonEvents.emit('network-changed', {
details: {
url: `dat://${datEncoding.toStr(archive.key)}`,
peers: getArchivePeerInfos(archive),
connections: archive.metadata.peers.length,
totalPeerCount
}
})
}
function matchDatLink (str) {
var match = str.match(/[A-Za-z0-9]{64}/)
if (!match) return false
var key
try {
key = encoding.toStr(match[0].trim())
} catch (e) {
return false
}
return key
}
function matchDatLink (str) {
var match = str.match(/[A-Za-z0-9]{64}/)
if (!match) return false
var key
try {
key = encoding.toStr(match[0].trim())
} catch (e) {
return false
}
return key
}
add (feed, opts) {
if (!feed.key) return feed.ready(() => { this.add(feed) })
const key = datEncoding.toStr(feed.key)
const discoveryKey = datEncoding.toStr(feed.discoveryKey)
this._replicatingFeeds.set(discoveryKey, feed)
this.rejoin(feed.discoveryKey, opts)
this.emit('join', { key, discoveryKey })
feed.isSwarming = true
}
function pause (dat, cb) {
var key = encoding.toStr(dat.key)
dat.leaveNetwork()
dat.stats.emit('update')
dbPaused.write(key, true, cb)
}
function resume (dat, cb) {
var key = encoding.toStr(dat.key)
dat.joinNetwork()
dat.stats.emit('update')
dbPaused.write(key, false, cb)
}