decaffeinate: Convert CommandRunner.coffee and 25 other files to JS

This commit is contained in:
decaffeinate
2020-02-19 12:14:14 +01:00
committed by mserranom
parent 37794788ce
commit 4655768fd2
26 changed files with 2801 additions and 1964 deletions

View File

@@ -1,358 +1,475 @@
Settings = require "settings-sharelatex"
logger = require "logger-sharelatex"
Docker = require("dockerode")
dockerode = new Docker()
crypto = require "crypto"
async = require "async"
LockManager = require "./DockerLockManager"
fs = require "fs"
Path = require 'path'
_ = require "underscore"
/*
* decaffeinate suggestions:
* DS101: Remove unnecessary use of Array.from
* DS102: Remove unnecessary code created because of implicit returns
* DS103: Rewrite code to no longer use __guard__
* DS205: Consider reworking code to avoid use of IIFEs
* DS207: Consider shorter variations of null checks
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
*/
let DockerRunner, oneHour;
const Settings = require("settings-sharelatex");
const logger = require("logger-sharelatex");
const Docker = require("dockerode");
const dockerode = new Docker();
const crypto = require("crypto");
const async = require("async");
const LockManager = require("./DockerLockManager");
const fs = require("fs");
const Path = require('path');
const _ = require("underscore");
logger.info "using docker runner"
logger.info("using docker runner");
usingSiblingContainers = () ->
Settings?.path?.sandboxedCompilesHostDir?
const usingSiblingContainers = () => __guard__(Settings != null ? Settings.path : undefined, x => x.sandboxedCompilesHostDir) != null;
module.exports = DockerRunner =
ERR_NOT_DIRECTORY: new Error("not a directory")
ERR_TERMINATED: new Error("terminated")
ERR_EXITED: new Error("exited")
ERR_TIMED_OUT: new Error("container timed out")
module.exports = (DockerRunner = {
ERR_NOT_DIRECTORY: new Error("not a directory"),
ERR_TERMINATED: new Error("terminated"),
ERR_EXITED: new Error("exited"),
ERR_TIMED_OUT: new Error("container timed out"),
run: (project_id, command, directory, image, timeout, environment, callback = (error, output) ->) ->
run(project_id, command, directory, image, timeout, environment, callback) {
if usingSiblingContainers()
_newPath = Settings.path.sandboxedCompilesHostDir
logger.log {path: _newPath}, "altering bind path for sibling containers"
# Server Pro, example:
# '/var/lib/sharelatex/data/compiles/<project-id>'
# ... becomes ...
# '/opt/sharelatex_data/data/compiles/<project-id>'
directory = Path.join(Settings.path.sandboxedCompilesHostDir, Path.basename(directory))
let name;
if (callback == null) { callback = function(error, output) {}; }
if (usingSiblingContainers()) {
const _newPath = Settings.path.sandboxedCompilesHostDir;
logger.log({path: _newPath}, "altering bind path for sibling containers");
// Server Pro, example:
// '/var/lib/sharelatex/data/compiles/<project-id>'
// ... becomes ...
// '/opt/sharelatex_data/data/compiles/<project-id>'
directory = Path.join(Settings.path.sandboxedCompilesHostDir, Path.basename(directory));
}
volumes = {}
volumes[directory] = "/compile"
const volumes = {};
volumes[directory] = "/compile";
command = (arg.toString().replace?('$COMPILE_DIR', "/compile") for arg in command)
if !image?
image = Settings.clsi.docker.image
command = (Array.from(command).map((arg) => __guardMethod__(arg.toString(), 'replace', o => o.replace('$COMPILE_DIR', "/compile"))));
if ((image == null)) {
({ image } = Settings.clsi.docker);
}
if Settings.texliveImageNameOveride?
img = image.split("/")
image = "#{Settings.texliveImageNameOveride}/#{img[2]}"
if (Settings.texliveImageNameOveride != null) {
const img = image.split("/");
image = `${Settings.texliveImageNameOveride}/${img[2]}`;
}
options = DockerRunner._getContainerOptions(command, image, volumes, timeout, environment)
fingerprint = DockerRunner._fingerprintContainer(options)
options.name = name = "project-#{project_id}-#{fingerprint}"
const options = DockerRunner._getContainerOptions(command, image, volumes, timeout, environment);
const fingerprint = DockerRunner._fingerprintContainer(options);
options.name = (name = `project-${project_id}-${fingerprint}`);
# logOptions = _.clone(options)
# logOptions?.HostConfig?.SecurityOpt = "secomp used, removed in logging"
logger.log project_id: project_id, "running docker container"
DockerRunner._runAndWaitForContainer options, volumes, timeout, (error, output) ->
if error?.message?.match("HTTP code is 500")
logger.log err: error, project_id: project_id, "error running container so destroying and retrying"
DockerRunner.destroyContainer name, null, true, (error) ->
return callback(error) if error?
DockerRunner._runAndWaitForContainer options, volumes, timeout, callback
else
callback(error, output)
// logOptions = _.clone(options)
// logOptions?.HostConfig?.SecurityOpt = "secomp used, removed in logging"
logger.log({project_id}, "running docker container");
DockerRunner._runAndWaitForContainer(options, volumes, timeout, function(error, output) {
if (__guard__(error != null ? error.message : undefined, x => x.match("HTTP code is 500"))) {
logger.log({err: error, project_id}, "error running container so destroying and retrying");
return DockerRunner.destroyContainer(name, null, true, function(error) {
if (error != null) { return callback(error); }
return DockerRunner._runAndWaitForContainer(options, volumes, timeout, callback);
});
} else {
return callback(error, output);
}
});
return name # pass back the container name to allow it to be killed
return name;
}, // pass back the container name to allow it to be killed
kill: (container_id, callback = (error) ->) ->
logger.log container_id: container_id, "sending kill signal to container"
container = dockerode.getContainer(container_id)
container.kill (error) ->
if error? and error?.message?.match?(/Cannot kill container .* is not running/)
logger.warn err: error, container_id: container_id, "container not running, continuing"
error = null
if error?
logger.error err: error, container_id: container_id, "error killing container"
return callback(error)
else
callback()
kill(container_id, callback) {
if (callback == null) { callback = function(error) {}; }
logger.log({container_id}, "sending kill signal to container");
const container = dockerode.getContainer(container_id);
return container.kill(function(error) {
if ((error != null) && __guardMethod__(error != null ? error.message : undefined, 'match', o => o.match(/Cannot kill container .* is not running/))) {
logger.warn({err: error, container_id}, "container not running, continuing");
error = null;
}
if (error != null) {
logger.error({err: error, container_id}, "error killing container");
return callback(error);
} else {
return callback();
}
});
},
_runAndWaitForContainer: (options, volumes, timeout, _callback = (error, output) ->) ->
callback = (args...) ->
_callback(args...)
# Only call the callback once
_callback = () ->
_runAndWaitForContainer(options, volumes, timeout, _callback) {
if (_callback == null) { _callback = function(error, output) {}; }
const callback = function(...args) {
_callback(...Array.from(args || []));
// Only call the callback once
return _callback = function() {};
};
name = options.name
const { name } = options;
streamEnded = false
containerReturned = false
output = {}
let streamEnded = false;
let containerReturned = false;
let output = {};
callbackIfFinished = () ->
if streamEnded and containerReturned
callback(null, output)
const callbackIfFinished = function() {
if (streamEnded && containerReturned) {
return callback(null, output);
}
};
attachStreamHandler = (error, _output) ->
return callback(error) if error?
output = _output
streamEnded = true
callbackIfFinished()
const attachStreamHandler = function(error, _output) {
if (error != null) { return callback(error); }
output = _output;
streamEnded = true;
return callbackIfFinished();
};
DockerRunner.startContainer options, volumes, attachStreamHandler, (error, containerId) ->
return callback(error) if error?
return DockerRunner.startContainer(options, volumes, attachStreamHandler, function(error, containerId) {
if (error != null) { return callback(error); }
DockerRunner.waitForContainer name, timeout, (error, exitCode) ->
return callback(error) if error?
if exitCode is 137 # exit status from kill -9
err = DockerRunner.ERR_TERMINATED
err.terminated = true
return callback(err)
if exitCode is 1 # exit status from chktex
err = DockerRunner.ERR_EXITED
err.code = exitCode
return callback(err)
containerReturned = true
options?.HostConfig?.SecurityOpt = null #small log line
logger.log err:err, exitCode:exitCode, options:options, "docker container has exited"
callbackIfFinished()
_getContainerOptions: (command, image, volumes, timeout, environment) ->
timeoutInSeconds = timeout / 1000
dockerVolumes = {}
for hostVol, dockerVol of volumes
dockerVolumes[dockerVol] = {}
if volumes[hostVol].slice(-3).indexOf(":r") == -1
volumes[hostVol] = "#{dockerVol}:rw"
# merge settings and environment parameter
env = {}
for src in [Settings.clsi.docker.env, environment or {}]
env[key] = value for key, value of src
# set the path based on the image year
if m = image.match /:([0-9]+)\.[0-9]+/
year = m[1]
else
year = "2014"
env['PATH'] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/texlive/#{year}/bin/x86_64-linux/"
options =
"Cmd" : command,
"Image" : image
"Volumes" : dockerVolumes
"WorkingDir" : "/compile"
"NetworkDisabled" : true
"Memory" : 1024 * 1024 * 1024 * 1024 # 1 Gb
"User" : Settings.clsi.docker.user
"Env" : ("#{key}=#{value}" for key, value of env) # convert the environment hash to an array
"HostConfig" :
"Binds": ("#{hostVol}:#{dockerVol}" for hostVol, dockerVol of volumes)
"LogConfig": {"Type": "none", "Config": {}}
"Ulimits": [{'Name': 'cpu', 'Soft': timeoutInSeconds+5, 'Hard': timeoutInSeconds+10}]
"CapDrop": "ALL"
"SecurityOpt": ["no-new-privileges"]
if Settings.path?.synctexBinHostPath?
options["HostConfig"]["Binds"].push("#{Settings.path.synctexBinHostPath}:/opt/synctex:ro")
if Settings.clsi.docker.seccomp_profile?
options.HostConfig.SecurityOpt.push "seccomp=#{Settings.clsi.docker.seccomp_profile}"
return options
_fingerprintContainer: (containerOptions) ->
# Yay, Hashing!
json = JSON.stringify(containerOptions)
return crypto.createHash("md5").update(json).digest("hex")
startContainer: (options, volumes, attachStreamHandler, callback) ->
LockManager.runWithLock options.name, (releaseLock) ->
# Check that volumes exist before starting the container.
# When a container is started with volume pointing to a
# non-existent directory then docker creates the directory but
# with root ownership.
DockerRunner._checkVolumes options, volumes, (err) ->
return releaseLock(err) if err?
DockerRunner._startContainer options, volumes, attachStreamHandler, releaseLock
, callback
# Check that volumes exist and are directories
_checkVolumes: (options, volumes, callback = (error, containerName) ->) ->
if usingSiblingContainers()
# Server Pro, with sibling-containers active, skip checks
return callback(null)
checkVolume = (path, cb) ->
fs.stat path, (err, stats) ->
return cb(err) if err?
return cb(DockerRunner.ERR_NOT_DIRECTORY) if not stats?.isDirectory()
cb()
jobs = []
for vol of volumes
do (vol) ->
jobs.push (cb) -> checkVolume(vol, cb)
async.series jobs, callback
_startContainer: (options, volumes, attachStreamHandler, callback = ((error, output) ->)) ->
callback = _.once(callback)
name = options.name
logger.log {container_name: name}, "starting container"
container = dockerode.getContainer(name)
createAndStartContainer = ->
dockerode.createContainer options, (error, container) ->
return callback(error) if error?
startExistingContainer()
startExistingContainer = ->
DockerRunner.attachToContainer options.name, attachStreamHandler, (error)->
return callback(error) if error?
container.start (error) ->
if error? and error?.statusCode != 304 #already running
return callback(error)
else
callback()
container.inspect (error, stats)->
if error?.statusCode == 404
createAndStartContainer()
else if error?
logger.err {container_name: name, error:error}, "unable to inspect container to start"
return callback(error)
else
startExistingContainer()
attachToContainer: (containerId, attachStreamHandler, attachStartCallback) ->
container = dockerode.getContainer(containerId)
container.attach {stdout: 1, stderr: 1, stream: 1}, (error, stream) ->
if error?
logger.error err: error, container_id: containerId, "error attaching to container"
return attachStartCallback(error)
else
attachStartCallback()
logger.log container_id: containerId, "attached to container"
MAX_OUTPUT = 1024 * 1024 # limit output to 1MB
createStringOutputStream = (name) ->
return {
data: ""
overflowed: false
write: (data) ->
return if @overflowed
if @data.length < MAX_OUTPUT
@data += data
else
logger.error container_id: containerId, length: @data.length, maxLen: MAX_OUTPUT, "#{name} exceeds max size"
@data += "(...truncated at #{MAX_OUTPUT} chars...)"
@overflowed = true
# kill container if too much output
# docker.containers.kill(containerId, () ->)
return DockerRunner.waitForContainer(name, timeout, function(error, exitCode) {
let err;
if (error != null) { return callback(error); }
if (exitCode === 137) { // exit status from kill -9
err = DockerRunner.ERR_TERMINATED;
err.terminated = true;
return callback(err);
}
if (exitCode === 1) { // exit status from chktex
err = DockerRunner.ERR_EXITED;
err.code = exitCode;
return callback(err);
}
containerReturned = true;
__guard__(options != null ? options.HostConfig : undefined, x => x.SecurityOpt = null); //small log line
logger.log({err, exitCode, options}, "docker container has exited");
return callbackIfFinished();
});
});
},
stdout = createStringOutputStream "stdout"
stderr = createStringOutputStream "stderr"
_getContainerOptions(command, image, volumes, timeout, environment) {
let m, year;
let key, value, hostVol, dockerVol;
const timeoutInSeconds = timeout / 1000;
container.modem.demuxStream(stream, stdout, stderr)
const dockerVolumes = {};
for (hostVol in volumes) {
dockerVol = volumes[hostVol];
dockerVolumes[dockerVol] = {};
stream.on "error", (err) ->
logger.error err: err, container_id: containerId, "error reading from container stream"
if (volumes[hostVol].slice(-3).indexOf(":r") === -1) {
volumes[hostVol] = `${dockerVol}:rw`;
}
}
stream.on "end", () ->
attachStreamHandler null, {stdout: stdout.data, stderr: stderr.data}
// merge settings and environment parameter
const env = {};
for (let src of [Settings.clsi.docker.env, environment || {}]) {
for (key in src) { value = src[key]; env[key] = value; }
}
// set the path based on the image year
if ((m = image.match(/:([0-9]+)\.[0-9]+/))) {
year = m[1];
} else {
year = "2014";
}
env['PATH'] = `/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/texlive/${year}/bin/x86_64-linux/`;
const options = {
"Cmd" : command,
"Image" : image,
"Volumes" : dockerVolumes,
"WorkingDir" : "/compile",
"NetworkDisabled" : true,
"Memory" : 1024 * 1024 * 1024 * 1024, // 1 Gb
"User" : Settings.clsi.docker.user,
"Env" : (((() => {
const result = [];
for (key in env) {
value = env[key];
result.push(`${key}=${value}`);
}
return result;
})())), // convert the environment hash to an array
"HostConfig" : {
"Binds": (((() => {
const result1 = [];
for (hostVol in volumes) {
dockerVol = volumes[hostVol];
result1.push(`${hostVol}:${dockerVol}`);
}
return result1;
})())),
"LogConfig": {"Type": "none", "Config": {}},
"Ulimits": [{'Name': 'cpu', 'Soft': timeoutInSeconds+5, 'Hard': timeoutInSeconds+10}],
"CapDrop": "ALL",
"SecurityOpt": ["no-new-privileges"]
}
};
waitForContainer: (containerId, timeout, _callback = (error, exitCode) ->) ->
callback = (args...) ->
_callback(args...)
# Only call the callback once
_callback = () ->
container = dockerode.getContainer(containerId)
if ((Settings.path != null ? Settings.path.synctexBinHostPath : undefined) != null) {
options["HostConfig"]["Binds"].push(`${Settings.path.synctexBinHostPath}:/opt/synctex:ro`);
}
timedOut = false
timeoutId = setTimeout () ->
timedOut = true
logger.log container_id: containerId, "timeout reached, killing container"
container.kill(() ->)
, timeout
if (Settings.clsi.docker.seccomp_profile != null) {
options.HostConfig.SecurityOpt.push(`seccomp=${Settings.clsi.docker.seccomp_profile}`);
}
logger.log container_id: containerId, "waiting for docker container"
container.wait (error, res) ->
if error?
clearTimeout timeoutId
logger.error err: error, container_id: containerId, "error waiting for container"
return callback(error)
if timedOut
logger.log containerId: containerId, "docker container timed out"
error = DockerRunner.ERR_TIMED_OUT
error.timedout = true
callback error
else
clearTimeout timeoutId
logger.log container_id: containerId, exitCode: res.StatusCode, "docker container returned"
callback null, res.StatusCode
return options;
},
destroyContainer: (containerName, containerId, shouldForce, callback = (error) ->) ->
# We want the containerName for the lock and, ideally, the
# containerId to delete. There is a bug in the docker.io module
# where if you delete by name and there is an error, it throws an
# async exception, but if you delete by id it just does a normal
# error callback. We fall back to deleting by name if no id is
# supplied.
LockManager.runWithLock containerName, (releaseLock) ->
DockerRunner._destroyContainer containerId or containerName, shouldForce, releaseLock
, callback
_fingerprintContainer(containerOptions) {
// Yay, Hashing!
const json = JSON.stringify(containerOptions);
return crypto.createHash("md5").update(json).digest("hex");
},
_destroyContainer: (containerId, shouldForce, callback = (error) ->) ->
logger.log container_id: containerId, "destroying docker container"
container = dockerode.getContainer(containerId)
container.remove {force: shouldForce == true}, (error) ->
if error? and error?.statusCode == 404
logger.warn err: error, container_id: containerId, "container not found, continuing"
error = null
if error?
logger.error err: error, container_id: containerId, "error destroying container"
else
logger.log container_id: containerId, "destroyed container"
callback(error)
startContainer(options, volumes, attachStreamHandler, callback) {
return LockManager.runWithLock(options.name, releaseLock =>
// Check that volumes exist before starting the container.
// When a container is started with volume pointing to a
// non-existent directory then docker creates the directory but
// with root ownership.
DockerRunner._checkVolumes(options, volumes, function(err) {
if (err != null) { return releaseLock(err); }
return DockerRunner._startContainer(options, volumes, attachStreamHandler, releaseLock);
})
, callback);
},
# handle expiry of docker containers
// Check that volumes exist and are directories
_checkVolumes(options, volumes, callback) {
if (callback == null) { callback = function(error, containerName) {}; }
if (usingSiblingContainers()) {
// Server Pro, with sibling-containers active, skip checks
return callback(null);
}
MAX_CONTAINER_AGE: Settings.clsi.docker.maxContainerAge or oneHour = 60 * 60 * 1000
const checkVolume = (path, cb) =>
fs.stat(path, function(err, stats) {
if (err != null) { return cb(err); }
if (!(stats != null ? stats.isDirectory() : undefined)) { return cb(DockerRunner.ERR_NOT_DIRECTORY); }
return cb();
})
;
const jobs = [];
for (let vol in volumes) {
(vol => jobs.push(cb => checkVolume(vol, cb)))(vol);
}
return async.series(jobs, callback);
},
examineOldContainer: (container, callback = (error, name, id, ttl)->) ->
name = container.Name or container.Names?[0]
created = container.Created * 1000 # creation time is returned in seconds
now = Date.now()
age = now - created
maxAge = DockerRunner.MAX_CONTAINER_AGE
ttl = maxAge - age
logger.log {containerName: name, created: created, now: now, age: age, maxAge: maxAge, ttl: ttl}, "checking whether to destroy container"
callback(null, name, container.Id, ttl)
_startContainer(options, volumes, attachStreamHandler, callback) {
if (callback == null) { callback = function(error, output) {}; }
callback = _.once(callback);
const { name } = options;
destroyOldContainers: (callback = (error) ->) ->
dockerode.listContainers all: true, (error, containers) ->
return callback(error) if error?
jobs = []
for container in containers or []
do (container) ->
DockerRunner.examineOldContainer container, (err, name, id, ttl) ->
if name.slice(0, 9) == '/project-' && ttl <= 0
jobs.push (cb) ->
DockerRunner.destroyContainer name, id, false, () -> cb()
# Ignore errors because some containers get stuck but
# will be destroyed next time
async.series jobs, callback
logger.log({container_name: name}, "starting container");
const container = dockerode.getContainer(name);
startContainerMonitor: () ->
logger.log {maxAge: DockerRunner.MAX_CONTAINER_AGE}, "starting container expiry"
# randomise the start time
randomDelay = Math.floor(Math.random() * 5 * 60 * 1000)
setTimeout () ->
setInterval () ->
DockerRunner.destroyOldContainers()
, oneHour = 60 * 60 * 1000
, randomDelay
const createAndStartContainer = () =>
dockerode.createContainer(options, function(error, container) {
if (error != null) { return callback(error); }
return startExistingContainer();
})
;
DockerRunner.startContainerMonitor()
var startExistingContainer = () =>
DockerRunner.attachToContainer(options.name, attachStreamHandler, function(error){
if (error != null) { return callback(error); }
return container.start(function(error) {
if ((error != null) && ((error != null ? error.statusCode : undefined) !== 304)) { //already running
return callback(error);
} else {
return callback();
}
});
})
;
return container.inspect(function(error, stats){
if ((error != null ? error.statusCode : undefined) === 404) {
return createAndStartContainer();
} else if (error != null) {
logger.err({container_name: name, error}, "unable to inspect container to start");
return callback(error);
} else {
return startExistingContainer();
}
});
},
attachToContainer(containerId, attachStreamHandler, attachStartCallback) {
const container = dockerode.getContainer(containerId);
return container.attach({stdout: 1, stderr: 1, stream: 1}, function(error, stream) {
if (error != null) {
logger.error({err: error, container_id: containerId}, "error attaching to container");
return attachStartCallback(error);
} else {
attachStartCallback();
}
logger.log({container_id: containerId}, "attached to container");
const MAX_OUTPUT = 1024 * 1024; // limit output to 1MB
const createStringOutputStream = function(name) {
return {
data: "",
overflowed: false,
write(data) {
if (this.overflowed) { return; }
if (this.data.length < MAX_OUTPUT) {
return this.data += data;
} else {
logger.error({container_id: containerId, length: this.data.length, maxLen: MAX_OUTPUT}, `${name} exceeds max size`);
this.data += `(...truncated at ${MAX_OUTPUT} chars...)`;
return this.overflowed = true;
}
}
// kill container if too much output
// docker.containers.kill(containerId, () ->)
};
};
const stdout = createStringOutputStream("stdout");
const stderr = createStringOutputStream("stderr");
container.modem.demuxStream(stream, stdout, stderr);
stream.on("error", err => logger.error({err, container_id: containerId}, "error reading from container stream"));
return stream.on("end", () => attachStreamHandler(null, {stdout: stdout.data, stderr: stderr.data}));
});
},
waitForContainer(containerId, timeout, _callback) {
if (_callback == null) { _callback = function(error, exitCode) {}; }
const callback = function(...args) {
_callback(...Array.from(args || []));
// Only call the callback once
return _callback = function() {};
};
const container = dockerode.getContainer(containerId);
let timedOut = false;
const timeoutId = setTimeout(function() {
timedOut = true;
logger.log({container_id: containerId}, "timeout reached, killing container");
return container.kill(function() {});
}
, timeout);
logger.log({container_id: containerId}, "waiting for docker container");
return container.wait(function(error, res) {
if (error != null) {
clearTimeout(timeoutId);
logger.error({err: error, container_id: containerId}, "error waiting for container");
return callback(error);
}
if (timedOut) {
logger.log({containerId}, "docker container timed out");
error = DockerRunner.ERR_TIMED_OUT;
error.timedout = true;
return callback(error);
} else {
clearTimeout(timeoutId);
logger.log({container_id: containerId, exitCode: res.StatusCode}, "docker container returned");
return callback(null, res.StatusCode);
}
});
},
destroyContainer(containerName, containerId, shouldForce, callback) {
// We want the containerName for the lock and, ideally, the
// containerId to delete. There is a bug in the docker.io module
// where if you delete by name and there is an error, it throws an
// async exception, but if you delete by id it just does a normal
// error callback. We fall back to deleting by name if no id is
// supplied.
if (callback == null) { callback = function(error) {}; }
return LockManager.runWithLock(containerName, releaseLock => DockerRunner._destroyContainer(containerId || containerName, shouldForce, releaseLock)
, callback);
},
_destroyContainer(containerId, shouldForce, callback) {
if (callback == null) { callback = function(error) {}; }
logger.log({container_id: containerId}, "destroying docker container");
const container = dockerode.getContainer(containerId);
return container.remove({force: shouldForce === true}, function(error) {
if ((error != null) && ((error != null ? error.statusCode : undefined) === 404)) {
logger.warn({err: error, container_id: containerId}, "container not found, continuing");
error = null;
}
if (error != null) {
logger.error({err: error, container_id: containerId}, "error destroying container");
} else {
logger.log({container_id: containerId}, "destroyed container");
}
return callback(error);
});
},
// handle expiry of docker containers
MAX_CONTAINER_AGE: Settings.clsi.docker.maxContainerAge || (oneHour = 60 * 60 * 1000),
examineOldContainer(container, callback) {
if (callback == null) { callback = function(error, name, id, ttl){}; }
const name = container.Name || (container.Names != null ? container.Names[0] : undefined);
const created = container.Created * 1000; // creation time is returned in seconds
const now = Date.now();
const age = now - created;
const maxAge = DockerRunner.MAX_CONTAINER_AGE;
const ttl = maxAge - age;
logger.log({containerName: name, created, now, age, maxAge, ttl}, "checking whether to destroy container");
return callback(null, name, container.Id, ttl);
},
destroyOldContainers(callback) {
if (callback == null) { callback = function(error) {}; }
return dockerode.listContainers({all: true}, function(error, containers) {
if (error != null) { return callback(error); }
const jobs = [];
for (let container of Array.from(containers || [])) {
(container =>
DockerRunner.examineOldContainer(container, function(err, name, id, ttl) {
if ((name.slice(0, 9) === '/project-') && (ttl <= 0)) {
return jobs.push(cb => DockerRunner.destroyContainer(name, id, false, () => cb()));
}
})
)(container);
}
// Ignore errors because some containers get stuck but
// will be destroyed next time
return async.series(jobs, callback);
});
},
startContainerMonitor() {
logger.log({maxAge: DockerRunner.MAX_CONTAINER_AGE}, "starting container expiry");
// randomise the start time
const randomDelay = Math.floor(Math.random() * 5 * 60 * 1000);
return setTimeout(() =>
setInterval(() => DockerRunner.destroyOldContainers()
, (oneHour = 60 * 60 * 1000))
, randomDelay);
}
});
DockerRunner.startContainerMonitor();
function __guard__(value, transform) {
return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined;
}
function __guardMethod__(obj, methodName, transform) {
if (typeof obj !== 'undefined' && obj !== null && typeof obj[methodName] === 'function') {
return transform(obj, methodName);
} else {
return undefined;
}
}