Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// Set azure table name
subClass.prototype.__table = options.table;
// Create an azure table client
var client = null;
if (options.account) {
// If we're setting up to fetch credentials for auth.taskcluster.net
assert(typeof(options.account) === 'string',
"Expected options.account to be a string, or undefined");
// Create auth client to fetch SAS from auth.taskcluster.net
var auth = new taskcluster.Auth({
credentials: options.credentials,
baseUrl: options.authBaseUrl
});
// Create azure table client with logic for fetch SAS
client = new azure.Table({
timeout: AZURE_TABLE_TIMEOUT,
agent: options.agent,
accountId: options.account,
minSASAuthExpiry: options.minSASAuthExpiry,
sas: function() {
return auth.azureTableSAS(
options.account,
options.table
).then(function(result) {
return result.sas;
});
}
});
} else {
// Create client using credentials already present
assert(options.credentials.accountName, "Missing accountName");
assert(/^[A-Za-z0-9][A-Za-z0-9-]*$/.test(options.prefix), 'Invalid prefix');
assert(options.prefix.length <= 6, 'Prefix is too long');
assert(options.resolvedQueue, 'A resolvedQueue name must be given');
assert(options.claimQueue, 'A claimQueue name must be given');
assert(options.deadlineQueue, 'A deadlineQueue name must be given');
assert(options.monitor, 'A monitor instance must be given');
options = _.defaults({}, options, {
pendingPollTimeout: 5 * 60 * 1000,
deadlineDelay: 10 * 60 * 1000,
});
this.prefix = options.prefix;
this.pendingPollTimeout = options.pendingPollTimeout;
this.monitor = options.monitor;
this.client = new azure.Queue({
accountId: options.credentials.accountName,
accessKey: options.credentials.accountKey,
timeout: AZURE_QUEUE_TIMEOUT,
});
// Store account name of use in SAS signed Urls
this.accountName = options.credentials.accountName;
// Promises that queues are created, return mapping from priority to
// azure queue names.
this.queues = {};
// Resets queues cache every 25 hours, this ensures that meta-data is kept
// up-to-date with a last_used field no more than 48 hours behind
setInterval(() => {this.queues = {};}, 25 * 60 * 60 * 1000);
// read-write to grant read-only permissions as well
await req.authorize({
account,
table: tableName,
level,
levelIsReadOnly: level === 'read-only',
});
// Check that the account exists
if (!this.azureAccounts[account]) {
return res.reportError('ResourceNotFound',
`Account '${account}' not found, can't delegate access`);
}
// Construct client
let table = new azure.Table({
accountId: account,
accessKey: this.azureAccounts[account],
});
// Create table, ignore error, if it already exists
if (level === 'read-write') {
// only try to create if we haven't done so in this process recently
const key = `${account}/${tableName}`;
if (!tableLastCreated[key] || new Date() - tableLastCreated[key] > 6 * 3600 * 1000) {
try {
await table.createTable(tableName);
} catch (err) {
if (err.code !== 'TableAlreadyExists') {
throw err;
}
}
// Set azure table name
subClass.prototype.__table = options.table;
// Create an azure table client
var client = null;
if (options.account) {
// If we're setting up to fetch credentials for auth.taskcluster.net
assert(typeof(options.account) === 'string',
"Expected options.account to be a string, or undefined");
// Create auth client to fetch SAS from auth.taskcluster.net
var auth = new taskcluster.Auth({
credentials: options.credentials,
baseUrl: options.authBaseUrl
});
// Create azure table client with logic for fetch SAS
client = new azure.Table({
timeout: AZURE_TABLE_TIMEOUT,
agent: options.agent,
accountId: options.account,
minSASAuthExpiry: options.minSASAuthExpiry,
sas: function() {
return auth.azureTableSAS(
options.account,
options.table
).then(function(result) {
return result.sas;
});
}
});
} else {
// Create client using credentials already present
assert(options.credentials.accountName, "Missing accountName");
const backupTable = async ({azureCreds, s3, bucket, tableName, utils}) => {
const stream = new zlib.createGzip();
const table = new azure.Table(azureCreds);
// Versioning is enabled in the backups bucket so we just overwrite the
// previous backup every time. The bucket is configured to delete previous
// versions after N days, but the current version will never be deleted.
const upload = s3.upload({
Bucket: bucket,
Key: `${azureCreds.accountId}/table/${tableName}`,
Body: stream,
StorageClass: 'STANDARD_IA',
}).promise();
const processEntities = entities => entities.map(
entity => stream.write(JSON.stringify(entity) + '\n'));
let count = 0;
let nextUpdateCount = 1000;
if (!req.satisfies({
account: account,
table: tableName
})) {
return;
}
// Check that the account exists
if (!this.azureAccounts[account]) {
return res.status(404).json({
message: "Account '" + account + "' not found, can't delegate access"
});
}
// Construct client
var table = new azure.Table({
accountId: account,
accessKey: this.azureAccounts[account]
});
// Create table ignore error, if it already exists
try {
await table.createTable(tableName);
} catch (err) {
if (err.code !== 'TableAlreadyExists') {
throw err;
}
}
// Construct SAS
var expiry = new Date(Date.now() + 25 * 60 * 1000);
var sas = table.sas(tableName, {
if (!inmemory) {
inmemory = require('./inmemory'); // lazy-loaded
}
subClass.prototype.__table = options.tableName;
subClass.prototype.__filterBuilder = inmemory.appendFilter;
subClass.prototype.__aux = new inmemory.InMemoryWrapper(options.tableName);
subClass.prototype.__client = {};
return subClass;
}
// Set azure table name
subClass.prototype.__table = options.tableName;
// Create an azure table client
const client = new azure.Table(_.defaults({
timeout: AZURE_TABLE_TIMEOUT,
agent: options.agent,
}, options.credentials));
// Store reference to azure table client
subClass.prototype.__client = client;
// set the filter builder
subClass.prototype.__filterBuilder = entityfilters.appendFilter;
// Create table client wrapper, to record statistics and bind table name
subClass.prototype.__aux = {};
[
'createTable',
'deleteTable',
'getEntity',
// Get parameters
let account = req.params.account;
let container = req.params.container;
let level = req.params.level;
// Check that the client is authorized to access given account and container
await req.authorize({level, account, container, levelIsReadOnly: level === 'read-only'});
// Check that the account exists
if (!this.azureAccounts[account]) {
return res.reportError('ResourceNotFound',
`Account '${level}' not found, can't delegate access.`);
}
// Construct client
let blob = new azure.Blob({
accountId: account,
accessKey: this.azureAccounts[account],
});
// Create container ignore error, if it already exists
if (level === 'read-write') {
const key = `${account}/${container}`;
if (!containerLastCreated[key] || new Date() - containerLastCreated[key] > 6 * 3600 * 1000) {
try {
await blob.createContainer(container);
} catch (err) {
if (err.code !== 'ContainerAlreadyExists') {
throw err;
}
}
containerLastCreated[key] = new Date();
const backupContainer = async ({azureCreds, s3, bucket, containerName, utils}) => {
const stream = new zlib.createGzip();
const container = new azure.Blob(azureCreds);
// Versioning is enabled in the backups bucket so we just overwrite the
// previous backup every time. The bucket is configured to delete previous
// versions after N days, but the current version will never be deleted.
let upload = s3.upload({
Bucket: bucket,
Key: `${azureCreds.accountId}/container/${containerName}`,
Body: stream,
StorageClass: 'STANDARD_IA',
}).promise();
let count = 0;
let nextUpdateCount = 1000;
let marker;
do {
let results = await container.listBlobs(containerName, {marker});
assert(options.prefix.length <= 6, 'Prefix is too long');
assert(options.resolvedQueue, 'A resolvedQueue name must be given');
assert(options.claimQueue, 'A claimQueue name must be given');
assert(options.deadlineQueue, 'A deadlineQueue name must be given');
assert(options.monitor, 'A monitor instance must be given');
options = _.defaults({}, options, {
deadlineDelay: 10 * 60 * 1000,
});
this.prefix = options.prefix;
this.monitor = options.monitor;
if (options.credentials.fake) {
this.client = new FakeQueueClient();
} else {
this.client = new azure.Queue({
accountId: options.credentials.accountId,
accessKey: options.credentials.accessKey,
timeout: AZURE_QUEUE_TIMEOUT,
});
}
// Store account name of use in SAS signed Urls
this.accountId = options.credentials.accountId;
// Promises that queues are created, return mapping from priority to
// azure queue names.
this.queues = {};
// Resets queues cache every 25 hours, this ensures that meta-data is kept
// up-to-date with a last_used field no more than 48 hours behind
this.queueResetInterval = setInterval(() => {this.queues = {};}, 25 * 60 * 60 * 1000);