Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async function deleteTableEntities (tables) {
const deletions = []
for (let index = 0; index < tables.length; index++) {
const table = tables[index]
const query = new azure.TableQuery() // Azure Table Storage has a max of 1000 records returned
let done = false
let batch = 1
while (!done) {
const data = await tableService.queryEntitiesAsync(table, query, null)
const entities = data.result.entries
if (entities.length === 0) {
done = true
}
console.log(`Found ${entities.length} entities to delete in batch ${batch++} from ${table}`)
entities.forEach(entity => {
deletions.push(tableService.deleteEntityAsync(table, entity))
})
await Promise.all(deletions)
}
}
}
require('dotenv').config()
const azure = require('azure-storage')
const bluebird = require('bluebird')
const names = require('../../deploy/storage/tables-queues.json')
if (!process.env.AZURE_STORAGE_CONNECTION_STRING) {
process.exitCode = -1
console.error('env var $AZURE_STORAGE_CONNECTION_STRING is required')
}
const queueNames = names['queues']
const tableNames = names['tables']
const poisonQueues = queueNames.map(q => q + '-poison')
const allQueues = queueNames.concat(poisonQueues)
const tableService = getPromisifiedService(azure.createTableService())
const queueService = getPromisifiedService(azure.createQueueService())
async function deleteTableEntities (tables) {
const deletions = []
for (let index = 0; index < tables.length; index++) {
const table = tables[index]
const query = new azure.TableQuery() // Azure Table Storage has a max of 1000 records returned
let done = false
let batch = 1
while (!done) {
const data = await tableService.queryEntitiesAsync(table, query, null)
const entities = data.result.entries
if (entities.length === 0) {
done = true
}
console.log(`Found ${entities.length} entities to delete in batch ${batch++} from ${table}`)
entities.forEach(entity => {
function leaseBlob(callback) {
// Create a blob client for interacting with the blob service from connection string
// How to create a storage connection string - http://msdn.microsoft.com/en-us/library/azure/ee758697.aspx
var blobService = storage.createBlobService(config.connectionString);
var containerName = "demoleaseblobcontainer-" + guid.v1();
var blobName = 'exclusive';
console.log('1. Create Container');
blobService.createContainerIfNotExists(containerName, function (error) {
if (error) return callback(error);
console.log('2. Create blob');
blobService.createBlockBlobFromText(containerName, blobName, 'blob created', function (error) {
if (error) return callback(error);
console.log('3. Acquire lease on blob');
blobService.acquireLease(containerName, blobName, { leaseDuration: 15 }, function (error, leaseResult) {
if (error) return callback(error);
var iotHubClient = ServiceClient.fromConnectionString(iotHubConnString);
// event hub alerts
var alerts = [];
var ehclient = EventHubClient.fromConnectionString(ehConnString, eventHubName)
ehclient.createReceiver('$Default', '0', { startAfterTime: Date.now() })
.then(function(rx) {
rx.on('errorReceived', function(err) { console.log(err); });
rx.on('message', function(message) {
alerts.push(message.body);
alerts = alerts.slice(-5); // keep last 5
});
});
// table storage
var tableSvc = azure.createTableService(storageAcountName, storageAccountKey);
tableSvc.createTableIfNotExists(storageTable, function(err, result, response) {
if (err) {
console.log('error looking up table');
console.log(err)
}
});
// website setup
var app = express();
var port = nconf.get('port');
app.use(express.static('public'));
app.use(express.static('bower_components'));
app.use(bodyParser.json());
// app api
app.get('/api/alerts', function(req, res) {
require('dotenv').config()
const azure = require('azure-storage')
const bluebird = require('bluebird')
const names = require('../../deploy/storage/tables-queues.json')
if (!process.env.AZURE_STORAGE_CONNECTION_STRING) {
process.exitCode = -1
console.error('env var $AZURE_STORAGE_CONNECTION_STRING is required')
}
const queueNames = names.queues
const tableNames = names.tables
const poisonQueues = queueNames.map(q => q + '-poison')
const allQueues = queueNames.concat(poisonQueues)
const tableService = getPromisifiedService(azure.createTableService())
const queueService = getPromisifiedService(azure.createQueueService())
async function deleteTableEntities (tables) {
const deletions = []
for (let index = 0; index < tables.length; index++) {
const table = tables[index]
const query = new azure.TableQuery() // Azure Table Storage has a max of 1000 records returned
let done = false
let batch = 1
while (!done) {
const data = await tableService.queryEntitiesAsync(table, query, null)
const entities = data.result.entries
if (entities.length === 0) {
done = true
}
console.log(`Found ${entities.length} entities to delete in batch ${batch++} from ${table}`)
entities.forEach(entity => {
'use strict'
require('dotenv').config()
const azure = require('azure-storage')
const storageConnection = process.env.AZURE_STORAGE_CONNECTION_STRING
if (!storageConnection) {
process.exitCode = -1
console.error('env var $AZURE_STORAGE_CONNECTION_STRING is required')
}
const queueName = process.env.QUEUE_NAME || 'completed-checks'
const permissions = azure.QueueUtilities.SharedAccessPermissions.ADD
var queueService = azure.createQueueService(storageConnection)
// setCors(queueService)
const generateSasToken = () => {
// Create a SAS token that expires in an hour
// Set start time to five minutes ago to avoid clock skew.
var startDate = new Date()
startDate.setMinutes(startDate.getMinutes() - 5)
var expiryDate = new Date(startDate)
expiryDate.setMinutes(startDate.getMinutes() + 60)
var sharedAccessPolicy = {
AccessPolicy: {
Permissions: permissions,
Start: startDate,
Expiry: expiryDate
}
console.log('Size:', size);
const stream = fs.createReadStream(file);
const [sha1hash, sha256hash] = await Promise.all([hashStream('sha1', stream), hashStream('sha256', stream)]);
console.log('SHA1:', sha1hash);
console.log('SHA256:', sha256hash);
const blobName = commit + '/' + name;
const storageAccount = process.env['AZURE_STORAGE_ACCOUNT_2']!;
const blobService = azure.createBlobService(storageAccount, process.env['AZURE_STORAGE_ACCESS_KEY_2']!)
.withFilter(new azure.ExponentialRetryPolicyFilter(20));
const mooncakeBlobService = azure.createBlobService(storageAccount, process.env['MOONCAKE_STORAGE_ACCESS_KEY']!, `${storageAccount}.blob.core.chinacloudapi.cn`)
.withFilter(new azure.ExponentialRetryPolicyFilter(20));
// mooncake is fussy and far away, this is needed!
mooncakeBlobService.defaultClientRequestTimeoutInMs = 10 * 60 * 1000;
await Promise.all([
assertContainer(blobService, quality),
assertContainer(mooncakeBlobService, quality)
]);
const [blobExists, moooncakeBlobExists] = await Promise.all([
doesAssetExist(blobService, quality, blobName),
doesAssetExist(mooncakeBlobService, quality, blobName)
]);
const promises: Array> = [];
storageClient.deleteEntity(tableName, customer, function entitiesQueried(error, result) {
if (error) return callback(error);
console.log(" deleteEntity succeeded.");
// Demonstrates upsert and batch table operations
console.log("5. Inserting a batch of entities. ");
// create batch operation
var batch = new storage.TableBatch();
var lastName = "Smith";
// The following code generates test data for use during the query samples.
for (var i = 0; i < 100; i++) {
var name = zeroPaddingString(i, 4);
var customerToInsert = createCustomerEntityDescriptor(lastName, name, name + "@contoso.com", "425-555-" + name)
batch.insertEntity(customerToInsert);
}
// Demonstrate inserting of a large batch of entities. Some considerations for batch operations:
// 1. You can perform updates, deletes, and inserts in the same single batch operation.
// 2. A single batch operation can include up to 100 entities.
// 3. All entities in a single batch operation must have the same partition key.
// 4. While it is possible to perform a query as a batch operation, it must be the only operation in the batch.
// 5. Batch size must be <= 4MB
tableService.createTableIfNotExists('Repositories', error => {
if (error) {
console.error(error)
return
}
// creates a batch of operation to be executed
var batch = new storage.TableBatch()
for (var i = 0; i < input.length; i++) {
var repository = input[i]
// Creates an operation to add the repository to Table Storage
batch.insertOrReplaceEntity({
PartitionKey: { _: 'Default' },
RowKey: { _: repository.id.toString() },
OpenedIssues: { _: repository.openedIssues },
RepositoryName: { _: repository.name }
})
}
// execute the batch of operations
tableService.executeBatch('Repositories', batch, error => {
if (error) {
console.error(error)
}
'use strict'
require('dotenv').config()
const azure = require('azure-storage')
const storageConnection = process.env.AZURE_STORAGE_CONNECTION_STRING
if (!storageConnection) {
process.exitCode = -1
console.error('env var $AZURE_STORAGE_CONNECTION_STRING is required')
}
const queueName = process.env.QUEUE_NAME || 'completed-checks'
const permissions = azure.QueueUtilities.SharedAccessPermissions.ADD
var queueService = azure.createQueueService(storageConnection)
// setCors(queueService)
const generateSasToken = () => {
// Create a SAS token that expires in an hour
// Set start time to five minutes ago to avoid clock skew.
var startDate = new Date()
startDate.setMinutes(startDate.getMinutes() - 5)
var expiryDate = new Date(startDate)
expiryDate.setMinutes(startDate.getMinutes() + 60)
var sharedAccessPolicy = {
AccessPolicy: {
Permissions: permissions,
Start: startDate,