Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
]
if (groupArg) {
const groupParts = groupArg.split('/')
const groupPos = parseInt(groupParts[0], 10)
const groupTotal = parseInt(groupParts[1], 10)
const numPerGroup = Math.ceil(testNames.length / groupTotal)
let offset = groupPos === 1 ? 0 : (groupPos - 1) * numPerGroup - 1
// if there's an odd number of suites give the first group the extra
if (testNames.length % 2 !== 0 && groupPos !== 1) offset++
testNames = testNames.splice(offset, numPerGroup)
}
console.log('Running tests:', '\n', ...testNames.map(name => `${name}\n`))
const sema = new Sema(concurrency, { capacity: testNames.length })
const jestPath = path.join(
path.dirname(require.resolve('jest-cli/package')),
'bin/jest.js'
)
const children = new Set()
const runTest = (test = '') =>
new Promise((resolve, reject) => {
const start = new Date().getTime()
const child = spawn(
'node',
[jestPath, '--runInBand', '--forceExit', '--verbose', test],
{
stdio: 'inherit',
}
)
return pseudoLayer;
}
interface CreateLambdaFromPseudoLayersOptions {
files: Files;
layers: PseudoLayer[];
handler: string;
runtime: string;
memory?: number;
maxDuration?: number;
environment?: { [name: string]: string };
}
// measured with 1, 2, 5, 10, and `os.cpus().length || 5`
// and sema(1) produced the best results
const createLambdaSema = new Sema(1);
export async function createLambdaFromPseudoLayers({
files,
layers,
handler,
runtime,
memory,
maxDuration,
environment = {},
}: CreateLambdaFromPseudoLayersOptions) {
await createLambdaSema.acquire();
const zipFile = new ZipFile();
const addedFiles = new Set();
const names = Object.keys(files).sort();
const symlinkTargets = new Map();
dest: string,
{
concurrency = 32,
overwrite = false,
filter = () => true,
}: {
concurrency?: number
overwrite?: boolean
filter?(path: string): boolean
} = {}
) {
const cwdPath = process.cwd()
const from = path.resolve(cwdPath, source)
const to = path.resolve(cwdPath, dest)
const sema = new Sema(concurrency)
async function _copy(item: string) {
const target = item.replace(from, to)
const stats = await stat(item)
await sema.acquire()
if (stats.isDirectory()) {
try {
await mkdir(target)
} catch (err) {
// do not throw `folder already exists` errors
if (err.code !== 'EEXIST') {
throw err
}
}
import { DeploymentFile } from './hashes';
import { parse as parseUrl } from 'url';
import fetch_, { RequestInit } from 'node-fetch';
import { join, sep } from 'path';
import qs from 'querystring';
import ignore from 'ignore';
import { pkgVersion } from '../pkg';
import { NowClientOptions, DeploymentOptions, NowConfig } from '../types';
import { Sema } from 'async-sema';
import { readFile } from 'fs-extra';
const semaphore = new Sema(10);
export const API_FILES = '/v2/now/files';
export const API_DELETE_DEPLOYMENTS_LEGACY = '/v2/now/deployments';
export const EVENTS = new Set([
// File events
'hashes-calculated',
'file_count',
'file-uploaded',
'all-files-uploaded',
// Deployment events
'created',
'ready',
'alias-assigned',
'warning',
'error',
constructor(url, { tls = true, debug } = {}) {
// We use multiple contexts because each context represent one connection
// With nginx, we're limited to 1000 requests before a connection is closed
// http://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests
// To get arround this, we keep track of requests made on a connection. when we're about to hit 1000
// we start up a new connection, and re-route all future traffic through the new connection
// and when the final request from the old connection resolves, we auto-close the old connection
this._contexts = [context()];
this._currContext = this._contexts[0];
this._currContext.fetchesMade = 0;
this._currContext.ongoingFetches = 0;
this._url = url;
const parsed = parse(url);
this._protocol = parsed.protocol;
this._sema = new Sema(20);
this._output = createOutput({ debug });
if (tls) {
this._initAgent();
}
}
async function f() {
const red = new Sema(3, {
initFn: () => redis().createClient(process.env.REDIS_URL)
});
const db = await red.acquire();
console.log(await db.get('id'));
red.release(db);
const dbs = await red.drain();
dbs.map(db => db.quit());
}
f()
async function h() {
console.log('Uniform distribution of requests over time');
const n = 50;
const lim = RateLimit(5, { uniformDistribution: true });
const start = process.hrtime();
for (let i = 0; i < n; i++) {
await lim();
process.stdout.write('.');
}
process.stdout.write('\n');
const hrt = process.hrtime(start);
const elapsed = (hrt[0] * 1000 + hrt[1] / 1e6) / 1000;
const rps = n / elapsed;
console.log(rps.toFixed(3) + ' req/s');
}
async function g() {
console.log('Custom rate limit time unit');
const n = 20;
const lim = RateLimit(5, { timeUnit: 60 * 1000 });
const start = process.hrtime();
for (let i = 0; i < n; i++) {
await lim();
process.stdout.write('.');
}
process.stdout.write('\n');
const hrt = process.hrtime(start);
const elapsed = (hrt[0] * 1000 + hrt[1] / 1e6) / 1000;
const rps = n / (elapsed / 60);
console.log(rps.toFixed(3) + ' req/min');
}
async function f() {
console.log('Naive requests per second rate limiting');
const n = 50;
const lim = RateLimit(5);
const start = process.hrtime();
for (let i = 0; i < n; i++) {
await lim();
process.stdout.write('.');
}
process.stdout.write('\n');
const hrt = process.hrtime(start);
const elapsed = (hrt[0] * 1000 + hrt[1] / 1e6) / 1000;
const rps = n / elapsed;
console.log(rps.toFixed(3) + ' req/s');
}
async function f() {
const arr = [];
for (let i = 0; i < 100; i++) arr.push(i + 1);
const s = new Sema(13, { capacity: arr.length });
await Promise.all(
arr.map(async elem => {
await s.acquire();
console.log(elem, s.nrWaiting());
await new Promise(resolve =>
setTimeout(resolve, getRnd(500, 3000))
);
s.release();
})
);
console.log('hello');
}
f()