Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
export function normaliseDagPb (node, cid, type) {
if (toCidStrOrNull(node.multihash) !== cid) {
throw new Error('dag-pb multihash should match provided cid')
}
node = node.toJSON()
let format
try {
// it's a unix system?
const { type, data, blockSizes } = unixfs.unmarshal(node.data)
node.data = { type, data, blockSizes }
format = `unixfs`
} catch (err) {
// dag-pb but not a unixfs.
// console.log(err)
}
return {
cid,
type,
data: node.data,
links: normaliseDagPbLinks(node),
size: node.size,
format
}
}
ipfs.getApi().object.data(link.hash, (err, marshaled) => {
clearTimeout(timeout)
if (err) {
// TODO: what's the right error for this?
log('[IPFS] Data fetch failed', err)
return cb(500, 'Failed')
}
// parse the data
var unmarshaled = Unixfs.unmarshal(marshaled)
var data = unmarshaled.data
// try to identify the type by the buffer contents
var mimeType
var identifiedExt = identify(data)
if (identifiedExt)
mimeType = mime.lookup(identifiedExt)
if (mimeType)
log('[IPFS] Identified entry mimetype as', mimeType)
else {
// fallback to using the entry name
mimeType = mime.lookup(link.name)
if (mimeType == 'application/octet-stream')
mimeType = 'text/plain' // TODO look if content is textlike?
log('[IPFS] Assumed mimetype from link name', mimeType)
}
ipfs.getApi().object.data(indexLink._multihash || indexLink.hash, (err, marshaled) => {
if (aborted) return
cleanup()
if (err) {
ipfs.checkIfConnectionFailed(err)
return cb(500, 'Failed')
}
// parse and send the data
var unmarshaled = Unixfs.unmarshal(marshaled)
res.writeHead(200, 'OK', {
'Content-Type': 'text/html',
'Content-Security-Policy': IPFS_CSP
})
return res.end(unmarshaled.data)
})
})
ds.get(stats.Hash, (err, node) => {
expect(err).to.not.exist
const dirSmallNode = new DAGNode()
const buf = fs.readFileSync(dirSmall + '.block')
dirSmallNode.unMarshal(buf)
expect(node.links).to.deep.equal(dirSmallNode.links)
const nodeUnixFS = UnixFS.unmarshal(node.data)
const dirUnixFS = UnixFS.unmarshal(dirSmallNode.data)
expect(nodeUnixFS.type).to.equal(dirUnixFS.type)
expect(nodeUnixFS.fileSize()).to.equal(dirUnixFS.fileSize())
expect(nodeUnixFS.data).to.deep.equal(dirUnixFS.data)
expect(nodeUnixFS.blockSizes).to.deep.equal(dirUnixFS.blockSizes)
expect(node.data).to.deep.equal(dirSmallNode.data)
expect(node.marshal()).to.deep.equal(dirSmallNode.marshal())
done()
})
})
ds.get(hash, (err, fetchedNode) => {
expect(err).to.not.exist
const unmarsh = UnixFS.unmarshal(fetchedNode.data)
expect(unmarsh.data).to.deep.equal(data.stream._readableState.buffer[0])
done()
})
})
children.forEach(child => {
const leaf = UnixFS.unmarshal(child.node.data)
file.addBlockSize(leaf.fileSize())
links.push(new DAGLink('', child.node.size, child.cid))
})
it('should get a node added as CIDv1 with a CIDv0', async () => {
const input = Buffer.from(`TEST${Date.now()}`)
const res = await ipfs.add(input, { cidVersion: 1, rawLeaves: false })
const cidv1 = new CID(res[0].hash)
expect(cidv1.version).to.equal(1)
const cidv0 = cidv1.toV0()
const output = await ipfs.dag.get(cidv0)
expect(Unixfs.unmarshal(output.value.Data).data).to.eql(input)
})
const file = Unixfs.default('file');
for (let i = 0; i < results.length; i++) {
file.addBlockSize(results[i].size);
}
const indexFile = new DAGNode(file.marshal());
for (let i = 0; i < results.length; i++) {
indexFile.addLink({
Hash: results[i].hash,
Size: results[i].size
});
}
const indexFileResponse = await this.ipfs.dag.put(indexFile, WarpController.DEFAULT_NODE_TYPE);
const directory = Unixfs.default('directory');
for (let i = 0; i < results.length; i++) {
directory.addBlockSize(results[i].size);
}
directory.addBlockSize(file.fileSize());
const directoryNode = new DAGNode(directory.marshal());
for (let i = 0; i < results.length; i++) {
console.log(results[i]);
directoryNode.addLink({
Name: `file${i}`,
Hash: results[i].hash,
Size: results[i].size
});
}
directoryNode.addLink({
exports.switchType = (node, dirHandler, fileHandler) => {
const data = UnixFS.unmarshal(node.data)
const type = data.type
if (type === 'directory') {
return dirHandler()
}
if (type === 'file') {
return fileHandler()
}
return pull.error(new Error('Unkown node type'))
}
module.exports = (cid, node, name, path, pathRest, resolve, size, dag, parent, depth, offset, length) => {
const accepts = pathRest[0]
if (accepts !== undefined && accepts !== path) {
return pull.empty()
}
let file
try {
file = UnixFS.unmarshal(node.data)
} catch (error) {
return pull.error(error)
}
const fileSize = size || file.fileSize()
if (offset < 0) {
return pull.error(new Error('Offset must be greater than or equal to 0'))
}
if (offset > fileSize) {
return pull.error(new Error('Offset must be less than the file size'))
}
if (length < 0) {
return pull.error(new Error('Length must be greater than or equal to 0'))