Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if (rebuildTransactionInformation) {
updateAttrs = await computeTransactionInformation(
Object.assign(
{ transaction, platformId, env, req },
_.omit(payload, ['metadata', 'platformData', 'status'])
)
)
}
const now = new Date().toISOString()
if (status) {
updateAttrs.status = status
const newStatusHistoryStep = { status, date: now }
updateAttrs.statusHistory = raw('?::jsonb || "statusHistory"', [ // prepend a jsonb array using PostgreSQL `||` operator
JSON.stringify([newStatusHistoryStep])
])
}
if (metadata) {
updateAttrs.metadata = Transaction.rawJsonbMerge('metadata', metadata)
}
if (platformData) {
updateAttrs.platformData = Transaction.rawJsonbMerge('platformData', platformData)
}
const newTransaction = await Transaction.query().patchAndFetchById(transactionId, updateAttrs)
// Synchronize internal availability when core transaction properties
// (assetId, dates, quantity) or status are updated
if (rebuildTransactionInformation || status) {
transactionProcess = getDefaultTransactionProcess()
}
} else {
transactionProcess = getDefaultTransactionProcess()
}
const status = transactionProcess.cancelStatus
const now = new Date().toISOString()
const newStatusHistoryStep = { status, date: now }
const updateAttrs = {
cancellationReason,
cancelledDate: now,
status,
statusHistory: raw('?::jsonb || "statusHistory"', [ // prepend a jsonb array using PostgreSQL `||` operator
JSON.stringify([newStatusHistoryStep])
])
}
const transitionsMeta = computeTransitionsMeta({ transitions: transactionProcess.transitions, initState: transactionProcess.initStatus })
if (transitionsMeta.endStates.includes(status)) {
updateAttrs.completedDate = now
}
const updatedTransaction = await Transaction.query(trx).patchAndFetchById(transaction.id, updateAttrs)
try {
await availabilityRequester.send({
type: '_syncInternalAvailabilityTransaction',
transactionIds: [transaction.id],
platformId,
async function useExistingBuild({ Build, ScreenshotBucket, data, repository }) {
const build = await Build.query()
.eager('compareScreenshotBucket')
.findOne({
'builds.repositoryId': repository.id,
externalId: data.externalBuildId,
})
// @TODO Throw an error if batchCount is superior to expected
if (build) {
await build.$query().patch({ batchCount: raw('"batchCount" + 1') })
return build
}
return createBuild({
Build,
ScreenshotBucket,
data,
repository,
complete: false,
})
}
}
const referencingAssetsQuery = Asset.query()
.whereJsonHasAny('customAttributes', customAttribute.name)
/*
const referencingAssets = await referencingAssetsQuery.clone()
.select('id', raw(`"customAttributes"->'${customAttribute.name}' AS value`))
const indexedCustomAttributeValues = _.keyBy(referencingAssets, 'id')
*/
/*
For performance (memory) reasons we do not take full snapshots of assets.
We might just capture Custom Attribute previous value as above for event sourcing
but we don’t need to since previous 'asset__(updated|created)' event already did
*/
const referencingAssetIds = await referencingAssetsQuery
.patch({ customAttributes: raw(`"customAttributes" - '${customAttribute.name}'`) })
.returning('id')
// Delete as soon as possible since there may be many events to generate
await CustomAttribute.query().deleteById(customAttributeId)
// Not awaiting this non-critical step for potentially *much* faster response
// OK as long as required DELETE logic was executed (just above).
bluebird.map(referencingAssetIds, async (emptyAsset) => {
const assetId = emptyAsset.id // only property
try {
// Just faking removal from object for event sourcing consistency
// We currently expose no way to really remove a root jsonb column object property
const updateAttrs = { customAttributes: { [customAttribute.name]: null } }
const eventDelta = {
// Must have the structure returned as object Event.getUpdatedEventDeltas
.whereIn('assetId', assetsIds)
.whereNull('cancelledDate')
if (filterStartDate) {
const period = '\'[' +
filterStartDate + ',' +
(filterEndDate || '') +
')\''
queryBuilder
.where(builder => {
return builder
.whereNotNull('startDate')
.whereNotNull('endDate')
})
.where(raw(`${period} && tstzrange("startDate"::timestamptz, "endDate"::timestamptz)`))
}
const transactions = await queryBuilder
const indexedTransactions = _.groupBy(transactions, 'assetId')
return assetsIds.reduce((memo, assetId) => {
memo[assetId] = indexedTransactions[assetId] || []
return memo
}, {})
})
await bluebird.map(chunkAvailabilities, async (chunkAvailability) => {
const { startDate, endDate, quantity } = chunkAvailability
const dbStartDate = startDate || '-infinity'
const dbEndDate = endDate || 'infinity'
await InternalAvailability.query(trx).insert({
assetId: asset.id,
datesRange: raw('tstzrange(?, ?)', [dbStartDate, dbEndDate]),
startDate: dbStartDate,
endDate: dbEndDate,
quantity
})
}, { concurrency: 5 })
const currentUserId = getRealCurrentUserId(req)
const {
dynamicReadNamespaces
} = await namespaceRequester.send({
type: 'getDynamicNamespaces',
platformId,
env,
readNamespaces: req._readNamespaces,
editNamespaces: req._editNamespaces,
object: user,
currentUserId
})
const newUser = await User.query().patchAndFetchById(userId, {
organizations: raw(`"organizations" - '${organizationId}'`)
})
publisher.publish('userOrganizationLeft', {
user: newUser,
organizationId,
eventDate: newUser.updatedDate,
platformId,
env
})
return User.expose(newUser, { req, namespaces: dynamicReadNamespaces })
})