Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
currentProject = p;
events.fire(e, p);
}
/**
* Job describes a particular job.
*
* A Job always has a name and an image. The name is used to reference this
* job in relation to other jobs in the same event. The image corresponds to a
* container image that will be executed as part of this job.
*
* A Job may also have one or more tasks associated with it. Tasks are run
* (in order) inside of the image. When no tasks are supplied, the image is
* executed as-is.
*/
export class Job extends jobImpl.Job {
jr: JobRunner;
run(): Promise {
this.jr = new JobRunner().init(this, currentEvent, currentProject, process.env.BRIGADE_SECRET_KEY_REF == 'true');
this._podName = this.jr.name;
return this.jr.run().catch(err => {
// Wrap the message to give clear context.
console.error(err);
let msg = `job ${ this.name }(${this.jr.name}): ${err}`;
return Promise.reject(new Error(msg));
});
}
logs(): Promise {
return this.jr.logs();
}
// For all other dot-slash-prefixed requires, resolve as usual.
// NOTE: module-alias will not allow us to just return "." here, because
// it uses path.join under the hood, which collapses "./foo" down to just
// "foo", for which the module resolution semantics are different. So,
// return the directory of the requiring module, which gives the same result
// as ".".
return path.dirname(fromPath);
});
moduleAlias();
require(script);
}
// Log level may come in as lowercased 'log', 'info', etc., if run by the brig cli
const logLevel = LogLevel[process.env.BRIGADE_LOG_LEVEL.toUpperCase() || "LOG"];
const logger = new ContextLogger([], logLevel);
const version = require("../package.json").version;
logger.log(`brigade-worker version: ${version}`);
const requiredEnvVar = (name: string): string => {
if (!process.env[name]) {
logger.log(`Missing required env ${name}`);
process.exit(1);
}
return process.env[name];
};
const projectID: string = requiredEnvVar("BRIGADE_PROJECT_ID");
const projectNamespace: string = requiredEnvVar("BRIGADE_PROJECT_NAMESPACE");
const defaultULID = ulid().toLocaleLowerCase();
let e: events.BrigadeEvent = {
public init(job: T, e: BrigadeEvent, project: Project, allowSecretKeyRef: boolean = true) {
this.options = Object.assign({}, options);
this.event = e;
this.logger = new ContextLogger("k8s", e.logLevel);
this.job = job;
this.project = project;
this.client = defaultClient;
this.serviceAccount = job.serviceAccount || this.options.serviceAccount;
this.pod = undefined;
this.cancel = false;
this.reconnect = false;
// $JOB-$BUILD
this.name = `${job.name}-${this.event.buildID}`;
let commit = e.revision.commit || "master";
let secName = this.name;
let runnerName = this.name;
this.secret = newSecret(secName);
this.runner = newRunnerPod(
*
* App assumes that it has full control of the process. It acts as a top-level
* error handler and will exit the process with errors when uncaught resolutions
* and errors occur.
*/
export class App {
/**
* exitOnError controls whether the app will exit when an uncaught exception or unhandled rejection occurs.
*
* exitOnError can be set to false in order to run tests on the error handling.
* In general, though, it should be left on. In some cases, by the time the
* process trap is invoked, the runtime is not in a good state to continue.
*/
public exitOnError: boolean = true;
protected errorsHandled: boolean = false;
protected logger: Logger = new ContextLogger("app");
protected lastEvent: events.BrigadeEvent;
protected projectID: string;
protected projectNS: string;
// On project loading error, this value may be passed. In all other cases,
// it is overwritten by an actual project.
protected proj: events.Project = new events.Project();
// true if the "after" event has fired.
protected afterHasFired: boolean = false;
protected storageIsDestroyed: boolean = false;
/**
* loadProject is a function that loads projects.
*/
public loadProject: ProjectLoader = k8s.loadProject;
/**
* buildStorage controls the per-build storage layer.
const vname = "build-storage";
this.runner.spec.volumes.push({
name: vname,
persistentVolumeClaim: { claimName: e.workerID.toLowerCase() }
} as kubernetes.V1Volume);
let mnt = volumeMount(vname, job.storage.path);
this.runner.spec.containers[0].volumeMounts.push(mnt);
}
// If the job needs access to a docker daemon, mount in the host's docker socket
if (job.docker.enabled && project.allowHostMounts) {
var dockerVol = new kubernetes.V1Volume();
var dockerMount = new kubernetes.V1VolumeMount();
var hostPath = new kubernetes.V1HostPathVolumeSource();
hostPath.path = jobs.dockerSocketMountPath;
dockerVol.name = jobs.dockerSocketMountName;
dockerVol.hostPath = hostPath;
dockerMount.name = jobs.dockerSocketMountName;
dockerMount.mountPath = jobs.dockerSocketMountPath;
this.runner.spec.volumes.push(dockerVol);
for (let i = 0; i < this.runner.spec.containers.length; i++) {
this.runner.spec.containers[i].volumeMounts.push(dockerMount);
}
}
// If the job defines volumes, add them to the pod's volume list.
// If the volume type is `hostPath`, first check if the project allows host mounts
// and throw an error if it it does not.
for (let v of job.volumes) {
if (v.hostPath != undefined && !project.allowHostMounts) {
throw new Error(`allowHostMounts is false in this project, not mounting ${v.hostPath.path}`);
}
name: vname,
persistentVolumeClaim: { claimName: e.workerID.toLowerCase() }
} as kubernetes.V1Volume);
let mnt = volumeMount(vname, job.storage.path);
this.runner.spec.containers[0].volumeMounts.push(mnt);
}
// If the job needs access to a docker daemon, mount in the host's docker socket
if (job.docker.enabled && project.allowHostMounts) {
var dockerVol = new kubernetes.V1Volume();
var dockerMount = new kubernetes.V1VolumeMount();
var hostPath = new kubernetes.V1HostPathVolumeSource();
hostPath.path = jobs.dockerSocketMountPath;
dockerVol.name = jobs.dockerSocketMountName;
dockerVol.hostPath = hostPath;
dockerMount.name = jobs.dockerSocketMountName;
dockerMount.mountPath = jobs.dockerSocketMountPath;
this.runner.spec.volumes.push(dockerVol);
for (let i = 0; i < this.runner.spec.containers.length; i++) {
this.runner.spec.containers[i].volumeMounts.push(dockerMount);
}
}
// If the job defines volumes, add them to the pod's volume list.
// If the volume type is `hostPath`, first check if the project allows host mounts
// and throw an error if it it does not.
for (let v of job.volumes) {
if (v.hostPath != undefined && !project.allowHostMounts) {
throw new Error(`allowHostMounts is false in this project, not mounting ${v.hostPath.path}`);
}
this.runner.spec.volumes.push(v);
}
if (job.storage.enabled) {
const vname = "build-storage";
this.runner.spec.volumes.push({
name: vname,
persistentVolumeClaim: { claimName: e.workerID.toLowerCase() }
} as kubernetes.V1Volume);
let mnt = volumeMount(vname, job.storage.path);
this.runner.spec.containers[0].volumeMounts.push(mnt);
}
// If the job needs access to a docker daemon, mount in the host's docker socket
if (job.docker.enabled && project.allowHostMounts) {
var dockerVol = new kubernetes.V1Volume();
var dockerMount = new kubernetes.V1VolumeMount();
var hostPath = new kubernetes.V1HostPathVolumeSource();
hostPath.path = jobs.dockerSocketMountPath;
dockerVol.name = jobs.dockerSocketMountName;
dockerVol.hostPath = hostPath;
dockerMount.name = jobs.dockerSocketMountName;
dockerMount.mountPath = jobs.dockerSocketMountPath;
this.runner.spec.volumes.push(dockerVol);
for (let i = 0; i < this.runner.spec.containers.length; i++) {
this.runner.spec.containers[i].volumeMounts.push(dockerMount);
}
}
// If the job defines volumes, add them to the pod's volume list.
// If the volume type is `hostPath`, first check if the project allows host mounts
// and throw an error if it it does not.
for (let v of job.volumes) {
if (v.hostPath != undefined && !project.allowHostMounts) {
throw new Error(`allowHostMounts is false in this project, not mounting ${v.hostPath.path}`);
persistentVolumeClaim: { claimName: e.workerID.toLowerCase() }
} as kubernetes.V1Volume);
let mnt = volumeMount(vname, job.storage.path);
this.runner.spec.containers[0].volumeMounts.push(mnt);
}
// If the job needs access to a docker daemon, mount in the host's docker socket
if (job.docker.enabled && project.allowHostMounts) {
var dockerVol = new kubernetes.V1Volume();
var dockerMount = new kubernetes.V1VolumeMount();
var hostPath = new kubernetes.V1HostPathVolumeSource();
hostPath.path = jobs.dockerSocketMountPath;
dockerVol.name = jobs.dockerSocketMountName;
dockerVol.hostPath = hostPath;
dockerMount.name = jobs.dockerSocketMountName;
dockerMount.mountPath = jobs.dockerSocketMountPath;
this.runner.spec.volumes.push(dockerVol);
for (let i = 0; i < this.runner.spec.containers.length; i++) {
this.runner.spec.containers[i].volumeMounts.push(dockerMount);
}
}
// If the job defines volumes, add them to the pod's volume list.
// If the volume type is `hostPath`, first check if the project allows host mounts
// and throw an error if it it does not.
for (let v of job.volumes) {
if (v.hostPath != undefined && !project.allowHostMounts) {
throw new Error(`allowHostMounts is false in this project, not mounting ${v.hostPath.path}`);
}
this.runner.spec.volumes.push(v);
}
import * as groupImpl from "@brigadecore/brigadier/out/group";
import * as eventsImpl from "@brigadecore/brigadier/out/events";
import { JobRunner } from "./k8s";
// These are filled by the 'fire' event handler.
let currentEvent = null;
let currentProject = null;
/**
* events is the main event registry.
*
* New event handlers can be registered using `events.on(name: string, (e: BrigadeEvent, p: Project) => {})`.
* where the `name` is the event name, and the callback is the function to be
* executed when the event is triggered.
*/
export let events = new eventsImpl.EventRegistry();
/**
* fire triggers an event.
*
* The fire() function takes a BrigadeEvent (the event to be triggered) and a
* Project (the owner project). If an event handler is found, it is executed.
* If no event handler is found, nothing happens.
*/
export function fire(e: eventsImpl.BrigadeEvent, p: eventsImpl.Project) {
currentEvent = e;
currentProject = p;
events.fire(e, p);
}
/**
* Job describes a particular job.
/**
* exitOnError controls whether the app will exit when an uncaught exception or unhandled rejection occurs.
*
* exitOnError can be set to false in order to run tests on the error handling.
* In general, though, it should be left on. In some cases, by the time the
* process trap is invoked, the runtime is not in a good state to continue.
*/
public exitOnError: boolean = true;
protected errorsHandled: boolean = false;
protected logger: Logger = new ContextLogger("app");
protected lastEvent: events.BrigadeEvent;
protected projectID: string;
protected projectNS: string;
// On project loading error, this value may be passed. In all other cases,
// it is overwritten by an actual project.
protected proj: events.Project = new events.Project();
// true if the "after" event has fired.
protected afterHasFired: boolean = false;
protected storageIsDestroyed: boolean = false;
/**
* loadProject is a function that loads projects.
*/
public loadProject: ProjectLoader = k8s.loadProject;
/**
* buildStorage controls the per-build storage layer.
*/
public buildStorage: BuildStorage = new k8s.BuildStorage();
protected exitCode: number = 0;
/**