extract ClientManager from server.js

Make client manager more robust when piping connections.
This commit is contained in:
Roman Shtylman
2017-12-11 21:16:55 -08:00
parent b4a36c78b0
commit 5fb9dbaaa1
8 changed files with 313 additions and 266 deletions

204
lib/ClientManager.js Normal file
View File

@@ -0,0 +1,204 @@
import Proxy from './Proxy';
// maybe remove?
import on_finished from 'on-finished';
import http from 'http';
import pump from 'pump';
import rand_id from './rand_id';
import BindingAgent from './BindingAgent';
const NoOp = () => {};
// Manage sets of clients
//
// A client is a "user session" established to service a remote localtunnel client
class ClientManager {
constructor(opt) {
this.opt = opt;
this.reqId = 0;
// id -> client instance
this.clients = Object.create(null);
// statistics
this.stats = {
tunnels: 0
};
}
stats() {
return this.stats;
}
// create a new tunnel with `id`
// if the id is already used, a random id is assigned
async newClient (id) {
const clients = this.clients;
const stats = this.stats;
// can't ask for id already is use
if (clients[id]) {
id = rand_id();
}
const popt = {
id: id,
max_tcp_sockets: this.opt.max_tcp_sockets
};
const client = Proxy(popt);
// add to clients map immediately
// avoiding races with other clients requesting same id
clients[id] = client;
client.on('end', () => {
--stats.tunnels;
delete clients[id];
});
return new Promise((resolve, reject) => {
// each local client has a tcp server to link with the remove localtunnel client
// this starts the server and waits until it is listening
client.start((err, info) => {
if (err) {
// clear the reserved client id
delete clients[id];
reject(err);
return;
}
++stats.tunnels;
info.id = id;
resolve(info);
});
});
}
hasClient(id) {
return this.clients[id];
}
// handle http request
handleRequest(clientId, req, res) {
const client = this.clients[clientId];
if (!client) {
return;
}
const reqId = this.reqId;
this.reqId = this.reqId + 1;
let endRes = () => {
endRes = NoOp;
res.end();
};
on_finished(res, () => {
endRes = NoOp;
});
client.nextSocket((clientSocket) => {
// response ended before we even got a socket to respond on
if (endRes === NoOp) {
return;
}
// happens when client upstream is disconnected (or disconnects)
// and the proxy iterates the waiting list and clears the callbacks
// we gracefully inform the user and kill their conn
// without this, the browser will leave some connections open
// and try to use them again for new requests
// TODO(roman) we could instead have a timeout above
// if no socket becomes available within some time,
// we just tell the user no resource available to service request
if (!clientSocket) {
endRes();
return;
}
const agent = new BindingAgent({
socket: clientSocket,
});
const opt = {
path: req.url,
agent: agent,
method: req.method,
headers: req.headers
};
return new Promise((resolve) => {
// what if error making this request?
const clientReq = http.request(opt, (clientRes) => {
// write response code and headers
res.writeHead(clientRes.statusCode, clientRes.headers);
// when this pump is done, we end our response
pump(clientRes, res, (err) => {
endRes();
resolve();
});
});
// we don't care about when this ends, only if there is error
pump(req, clientReq, (err) => {
if (err) {
endRes();
resolve();
}
});
});
});
}
// handle http upgrade
handleUpgrade(clientId, req, sock) {
const client = this.clients[clientId];
if (!client) {
return;
}
client.nextSocket(async (clientSocket) => {
if (!sock.readable || !sock.writable) {
sock.end();
return;
}
// happens when client upstream is disconnected (or disconnects)
// and the proxy iterates the waiting list and clears the callbacks
// we gracefully inform the user and kill their conn
// without this, the browser will leave some connections open
// and try to use them again for new requests
// TODO(roman) we could instead have a timeout above
// if no socket becomes available within some time,
// we just tell the user no resource available to service request
if (!clientSocket) {
sock.end();
return;
}
// websocket requests are special in that we simply re-create the header info
// then directly pipe the socket data
// avoids having to rebuild the request and handle upgrades via the http client
const arr = [`${req.method} ${req.url} HTTP/${req.httpVersion}`];
for (let i=0 ; i < (req.rawHeaders.length-1) ; i+=2) {
arr.push(`${req.rawHeaders[i]}: ${req.rawHeaders[i+1]}`);
}
arr.push('');
arr.push('');
clientSocket.pipe(sock).pipe(clientSocket);
clientSocket.write(arr.join('\r\n'));
await new Promise((resolve) => {
sock.once('end', resolve);
});
});
}
}
export default ClientManager;

View File

@@ -16,6 +16,8 @@ const Proxy = function(opt) {
self.waiting = [];
self.id = opt.id;
self.activeSockets = 0;
// default max is 10
self.max_tcp_sockets = opt.max_tcp_sockets || 10;
@@ -72,6 +74,8 @@ Proxy.prototype._maybe_destroy = function() {
const self = this;
clearTimeout(self.conn_timeout);
// After last socket is gone, we give opportunity to connect again quickly
self.conn_timeout = setTimeout(function() {
// sometimes the server is already closed but the event has not fired?
try {
@@ -81,7 +85,7 @@ Proxy.prototype._maybe_destroy = function() {
catch (err) {
self._cleanup();
}
}, 5000);
}, 1000);
}
// new socket connection from client for tunneling requests to client
@@ -89,16 +93,19 @@ Proxy.prototype._handle_socket = function(socket) {
const self = this;
// no more socket connections allowed
if (self.sockets.length >= self.max_tcp_sockets) {
if (self.activeSockets >= self.max_tcp_sockets) {
return socket.end();
}
self.activeSockets = self.activeSockets + 1;
self.debug('new connection from: %s:%s', socket.address().address, socket.address().port);
// a single connection is enough to keep client id slot open
clearTimeout(self.conn_timeout);
socket.once('close', function(had_error) {
self.activeSockets = self.activeSockets - 1;
self.debug('closed socket (error: %s)', had_error);
// what if socket was servicing a request at this time?
@@ -133,10 +140,10 @@ Proxy.prototype._handle_socket = function(socket) {
Proxy.prototype._process_waiting = function() {
const self = this;
const wait_cb = self.waiting.shift();
if (wait_cb) {
const fn = self.waiting.shift();
if (fn) {
self.debug('handling queued request');
self.next_socket(wait_cb);
self.nextSocket(fn);
}
};
@@ -152,48 +159,31 @@ Proxy.prototype._cleanup = function() {
self.emit('end');
};
Proxy.prototype.next_socket = function(handler) {
Proxy.prototype.nextSocket = async function(fn) {
const self = this;
// socket is a tcp connection back to the user hosting the site
const sock = self.sockets.shift();
if (!sock) {
self.debug('no more client, queue callback');
self.waiting.push(handler);
self.debug('no more clients, queue callback');
self.waiting.push(fn);
return;
}
self.debug('processing request');
handler(sock)
.then(() => {
if (!sock.destroyed) {
self.debug('retuning socket');
self.sockets.push(sock);
}
await fn(sock);
// no sockets left to process waiting requests
if (self.sockets.length === 0) {
return;
}
if (!sock.destroyed) {
self.debug('retuning socket');
self.sockets.push(sock);
}
self._process_waiting();
})
.catch((err) => {
log.error(err);
// no sockets left to process waiting requests
if (self.sockets.length === 0) {
return;
}
if (!sock.destroyed) {
self.debug('retuning socket');
self.sockets.push(sock);
}
// no sockets left to process waiting requests
if (self.sockets.length === 0) {
return;
}
self._process_waiting();
})
self._process_waiting();
};
Proxy.prototype._done = function() {