How to use socketcluster - 10 common examples

To help you get started, we’ve selected a few socketcluster examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jondubois / iogrid / server.js View on Github external
var start = function () {
  var socketCluster = new SocketCluster(options);

  if (masterControllerPath) {
    var masterController = require(masterControllerPath);
    masterController.run(socketCluster);
  }

  if (environment == 'dev') {
    // This will cause SC workers to reboot when code changes anywhere in the app directory.
    // The second options argument here is passed directly to chokidar.
    // See https://github.com/paulmillr/chokidar#api for details.
    console.log(`   !! The sc-hot-reboot plugin is watching for code changes in the ${__dirname} directory`);
    scHotReboot.attach(socketCluster, {
      cwd: __dirname,
      ignored: ['public', 'node_modules', 'README.md', 'Dockerfile', 'server.js', 'broker.js', /[\/\\]\./]
    });
  }
github epochtalk / epochtalk / websocket-server / index.js View on Github external
return new Promise(function(resolve) {
        var socketCluster = new SocketCluster({
          authKey: config.authKey,
          workers: config.workers,
          brokers: config.brokers,
          wsEngine: config.wsEngine,
          protocol: config.protocol,
          protocolOptions: config.protocolOptions,
          port: config.port,
          host: config.host,
          appName: 'ept-ws',
          workerController: path.normalize(__dirname + '/worker.js'),
          allowClientPublish: false
        });
        return resolve(socketCluster);
      })
      // cleanup (process exit, flushes db)
github mattkrick / meatier / src / server / server.js View on Github external
export const options = {
  authKey: process.env.JWT_SECRET,
  logLevel: 1,
  // change this to scale vertically
  workers: 1 || numCpus,
  brokers: 1,
  port: process.env.PORT || 3000,
  appName: 'Meatier',
  allowClientPublish: false,
  initController: path.join(__dirname, '/init.js'),
  workerController: path.join(__dirname, '/worker.js'),
  brokerController: path.join(__dirname, '/broker.js'),
  socketChannelLimit: 1000,
  rebootWorkerOnCrash: true
};
new SocketCluster(options); // eslint-disable-line no-new
github SocketCluster / socketcluster / sample / server.js View on Github external
/*
  This is the SocketCluster master controller file.
  It is responsible for bootstrapping the SocketCluster master process.
  Be careful when modifying the options object below.
  If you plan to run SCC on Kubernetes or another orchestrator at some point
  in the future, avoid changing the environment variable names below as
  each one has a specific meaning within the SC ecosystem.
*/

var path = require('path');
var argv = require('minimist')(process.argv.slice(2));
var scHotReboot = require('sc-hot-reboot');

var fsUtil = require('socketcluster/fsutil');
var waitForFile = fsUtil.waitForFile;

var SocketCluster = require('socketcluster');

var workerControllerPath = argv.wc || process.env.SOCKETCLUSTER_WORKER_CONTROLLER;
var brokerControllerPath = argv.bc || process.env.SOCKETCLUSTER_BROKER_CONTROLLER;
var workerClusterControllerPath = argv.wcc || process.env.SOCKETCLUSTER_WORKERCLUSTER_CONTROLLER;
var environment = process.env.ENV || 'dev';

var options = {
  workers: Number(argv.w) || Number(process.env.SOCKETCLUSTER_WORKERS) || 1,
  brokers: Number(argv.b) || Number(process.env.SOCKETCLUSTER_BROKERS) || 1,
  port: Number(argv.p) || Number(process.env.SOCKETCLUSTER_PORT) || 8000,
  // You can switch to 'sc-uws' for improved performance.
  wsEngine: process.env.SOCKETCLUSTER_WS_ENGINE || 'ws',
  appName: argv.n || process.env.SOCKETCLUSTER_APP_NAME || null,
  workerController: workerControllerPath || path.join(__dirname, 'worker.js'),
github SocketCluster / baasil-cli / boilerplates / scc / server.js View on Github external
/*
  This is the SocketCluster master controller file.
  It is responsible for bootstrapping the SocketCluster master process.
  Be careful when modifying the options object below.
  If you plan to run SCC on Kubernetes or another orchestrator at some point
  in the future, avoid changing the environment variable names below as
  each one has a specific meaning within the SC ecosystem.
*/

var path = require('path');
var argv = require('minimist')(process.argv.slice(2));
var scHotReboot = require('sc-hot-reboot');

var fsUtil = require('socketcluster/fsutil');
var waitForFile = fsUtil.waitForFile;

var SocketCluster = require('socketcluster');

var workerControllerPath = argv.wc || process.env.SOCKETCLUSTER_WORKER_CONTROLLER;
var brokerControllerPath = argv.bc || process.env.SOCKETCLUSTER_BROKER_CONTROLLER;
var workerClusterControllerPath = argv.wcc || process.env.SOCKETCLUSTER_WORKERCLUSTER_CONTROLLER;
var environment = process.env.ENV || 'dev';

var options = {
  workers: Number(argv.w) || Number(process.env.SOCKETCLUSTER_WORKERS) || 1,
  brokers: Number(argv.b) || Number(process.env.SOCKETCLUSTER_BROKERS) || 1,
  port: Number(argv.p) || Number(process.env.SOCKETCLUSTER_PORT) || 8000,
  // You can switch to 'sc-uws' for improved performance.
  wsEngine: process.env.SOCKETCLUSTER_WS_ENGINE || 'sc-uws',
  appName: argv.n || process.env.SOCKETCLUSTER_APP_NAME || null,
  workerController: workerControllerPath || './worker.js',
github ArkEcosystem / core / packages / core-p2p / src / socket-server / index.ts View on Github external
export const startSocketServer = async (service: P2P.IPeerService, config: Record): Promise => {
    // when testing we also need to get socket files from dist folder
    const relativeSocketPath = process.env.CORE_ENV === "test" ? "/../../dist/socket-server" : "";

    const server: SocketCluster = new SocketCluster({
        ...{
            appName: "core-p2p",
            brokers: 1,
            environment: process.env.CORE_NETWORK_NAME === "testnet" ? "dev" : "prod",
            rebootWorkerOnCrash: true,
            workerController: __dirname + `${relativeSocketPath}/worker.js`,
            workers: 2,
            wsEngine: "ws",
            perMessageDeflate: true,
            // we set maxPayload value to 2MB as currently the largest data going through is a block
            // and (for now, TODO use milestone value ?) blocks are not larger than 2MB
            maxPayload: 2 * 1024 * 1024,
        },
        ...config.server,
    });
github LiskHQ / lisk-sdk-examples / framework / src / modules / chain / workers_controller.js View on Github external
const PeersUpdateRules = require('./api/ws/workers/peers_update_rules');
const Rules = require('./api/ws/workers/rules');
const failureCodes = require('./api/ws/rpc/failure_codes');
const { createLoggerComponent } = require('../../../src/components/logger');

const validator = require('../../controller/validator');
const schema = require('./defaults/config');

const config = validator.parseEnvArgAndValidate(schema, {});

/**
 * Instantiate the SocketCluster SCWorker instance with custom logic
 * inside the run function. The run function is invoked when the worker process
 * is ready to accept requests/connections.
 */
SCWorker.create({
	// Pass the custom configuration to P2P HTTP Server to mitigate the security vulnerabilities fixed by Node v8.14.0 - "Slowloris (cve-2018-12122)"
	createHTTPServer() {
		const httpServer = http.createServer();
		httpServer.headersTimeout = config.network.options.httpHeadersTimeout;
		httpServer.setTimeout(config.network.options.httpServerSetTimeout);
		httpServer.on('timeout', socket => {
			socket.destroy();
		});
		return httpServer;
	},
	run() {
		const self = this;
		const scServer = this.getSCServer();

		async.auto(
			{
github SocketCluster / socketcluster / sample / dockerwait.js View on Github external
/*
  This script waits for the master controller script to become available.
  With orchestrators like Kubernetes, the master controller file may be fed in through
  a volume container at runtime and so it is necessary to wait for it before launch.
*/

var fsUtil = require('socketcluster/fsutil');
var waitForFile = fsUtil.waitForFile;

var SOCKETCLUSTER_MASTER_DEFAULT_CONTROLLER = './server.js';
var masterControllerPath = process.env.SOCKETCLUSTER_MASTER_CONTROLLER || SOCKETCLUSTER_MASTER_DEFAULT_CONTROLLER;
var bootCheckInterval = Number(process.env.SOCKETCLUSTER_BOOT_CHECK_INTERVAL) || 200;
var bootTimeout = Number(process.env.SOCKETCLUSTER_CONTROLLER_BOOT_TIMEOUT) || 10000;
var bootStartTime = Date.now();

var errorMessage = `Failed to locate the master controller file at path ${masterControllerPath} before SOCKETCLUSTER_CONTROLLER_BOOT_TIMEOUT`;

waitForFile(masterControllerPath, bootCheckInterval, bootStartTime, bootTimeout, errorMessage)
.catch((err) => {
  console.error('> Boot error: ' + err.message);
  process.exit(1);
});
github zalmoxisus / remotedev-server / index.js View on Github external
if (logLevel >= LOG_LEVEL_ERROR) {
          console.error(err);
        }
        return;
      }
      if (port !== p) {
        if (logLevel >= LOG_LEVEL_WARN) {
          console.log('[RemoteDev] Server port ' + port + ' is already used.');
        }
        resolve({ portAlreadyUsed: true, on: function(status, cb) { cb(); } });
      } else {
        if (logLevel >= LOG_LEVEL_INFO) {
          console.log('[RemoteDev] Start server...');
          console.log(repeat('-', 80) + '\n');
        }
        resolve(new SocketCluster(options));
      }
    });
  });
github IdleLands / IdleLands3 / socketcluster / server.js View on Github external
var ip = _(os.networkInterfaces())
  .values()
  .flatten()
  .filter(val => val.family === 'IPv4' && val.internal === false)
  .map('address')
  .first();

if(ip) {
  console.log(`Your IP is: ${ip}`);
}

process.on('unhandledRejection', function(reason, p) {
  console.log('Unhandled Rejection at: Promise ', p, ' reason: ', reason);
});

var socketCluster = new SocketCluster({
  workers: Number(argv.w) || 1,
  stores: Number(argv.s) || 1,
  port: Number(argv.p) || process.env.PORT || 8080,
  appName: argv.n || 'reactive-retro',
  initController: __dirname + '/init.js',
  workerController: __dirname + '/worker.js',
  brokerController: __dirname + '/broker.js',
  socketChannelLimit: 100,
  rebootWorkerOnCrash: argv['auto-reboot'] != false,
  logLevel: process.env.NODE_ENV === 'production' ? 3 : 1
});

socketCluster.on('fail', function(e) {
  console.error(e);
});

socketcluster

Highly scalable realtime framework with support for async/await

MIT
Latest version published 2 months ago

Package Health Score

67 / 100
Full package analysis