Name: js-handler/node_modules/restify/lib/plugins/throttle.js
| 1: | // Copyright 2012 Mark Cavage <[email protected]> All rights reserved. |
| 2: | |
| 3: | var sprintf = require('util').format; |
| 4: | |
| 5: | var assert = require('assert-plus'); |
| 6: | var LRU = require('lru-cache'); |
| 7: | |
| 8: | var errors = require('../errors'); |
| 9: | |
| 10: | |
| 11: | |
| 12: | ///--- Globals |
| 13: | |
| 14: | var TooManyRequestsError = errors.TooManyRequestsError; |
| 15: | |
| 16: | var MESSAGE = 'You have exceeded your request rate of %s r/s.'; |
| 17: | |
| 18: | |
| 19: | |
| 20: | ///--- Helpers |
| 21: | |
| 22: | function xor() { |
| 23: | var x = false; |
| 24: | for (var i = 0; i < arguments.length; i++) { |
| 25: | if (arguments[i] && !x) |
| 26: | x = true; |
| 27: | else if (arguments[i] && x) |
| 28: | return (false); |
| 29: | } |
| 30: | return (x); |
| 31: | } |
| 32: | |
| 33: | |
| 34: | |
| 35: | ///--- Internal Class (TokenBucket) |
| 36: | |
| 37: | /** |
| 38: | * An implementation of the Token Bucket algorithm. |
| 39: | * |
| 40: | * Basically, in network throttling, there are two "mainstream" |
| 41: | * algorithms for throttling requests, Token Bucket and Leaky Bucket. |
| 42: | * For restify, I went with Token Bucket. For a good description of the |
| 43: | * algorithm, see: http://en.wikipedia.org/wiki/Token_bucket |
| 44: | * |
| 45: | * In the options object, you pass in the total tokens and the fill rate. |
| 46: | * Practically speaking, this means "allow `fill rate` requests/second, |
| 47: | * with bursts up to `total tokens`". Note that the bucket is initialized |
| 48: | * to full. |
| 49: | * |
| 50: | * Also, in googling, I came across a concise python implementation, so this |
| 51: | * is just a port of that. Thanks http://code.activestate.com/recipes/511490 ! |
| 52: | * |
| 53: | * @param {Object} options contains the parameters: |
| 54: | * - {Number} capacity the maximum burst. |
| 55: | * - {Number} fillRate the rate to refill tokens. |
| 56: | */ |
| 57: | function TokenBucket(options) { |
| 58: | assert.object(options, 'options'); |
| 59: | assert.number(options.capacity, 'options.capacity'); |
| 60: | assert.number(options.fillRate, 'options.fillRate'); |
| 61: | |
| 62: | this.tokens = this.capacity = options.capacity; |
| 63: | this.fillRate = options.fillRate; |
| 64: | this.time = Date.now(); |
| 65: | } |
| 66: | |
| 67: | |
| 68: | /** |
| 69: | * Consume N tokens from the bucket. |
| 70: | * |
| 71: | * If there is not capacity, the tokens are not pulled from the bucket. |
| 72: | * |
| 73: | * @param {Number} tokens the number of tokens to pull out. |
| 74: | * @return {Boolean} true if capacity, false otherwise. |
| 75: | */ |
| 76: | TokenBucket.prototype.consume = function consume(tokens) { |
| 77: | if (tokens <= this._fill()) { |
| 78: | this.tokens -= tokens; |
| 79: | return (true); |
| 80: | } |
| 81: | |
| 82: | return (false); |
| 83: | }; |
| 84: | |
| 85: | |
| 86: | /** |
| 87: | * Fills the bucket with more tokens. |
| 88: | * |
| 89: | * Rather than do some whacky setTimeout() deal, we just approximate refilling |
| 90: | * the bucket by tracking elapsed time from the last time we touched the bucket. |
| 91: | * |
| 92: | * Simply, we set the bucket size to min(totalTokens, |
| 93: | * current + (fillRate * elapsed time)). |
| 94: | * |
| 95: | * @return {Number} the current number of tokens in the bucket. |
| 96: | */ |
| 97: | TokenBucket.prototype._fill = function _fill() { |
| 98: | var now = Date.now(); |
| 99: | if (now < this.time) // reset account for clock drift (like DST) |
| 100: | this.time = now - 1000; |
| 101: | |
| 102: | if (this.tokens < this.capacity) { |
| 103: | var delta = this.fillRate * ((now - this.time) / 1000); |
| 104: | this.tokens = Math.min(this.capacity, this.tokens + delta); |
| 105: | } |
| 106: | this.time = now; |
| 107: | |
| 108: | return (this.tokens); |
| 109: | }; |
| 110: | |
| 111: | |
| 112: | |
| 113: | ///--- Internal Class (TokenTable) |
| 114: | // Just a wrapper over LRU that supports put/get to store token -> bucket |
| 115: | // mappings |
| 116: | |
| 117: | function TokenTable(options) { |
| 118: | assert.object(options, 'options'); |
| 119: | |
| 120: | this.table = new LRU(options.size || 10000); |
| 121: | } |
| 122: | |
| 123: | |
| 124: | TokenTable.prototype.put = function put(key, value) { |
| 125: | this.table.set(key, value); |
| 126: | }; |
| 127: | |
| 128: | |
| 129: | TokenTable.prototype.get = function get(key) { |
| 130: | return (this.table.get(key)); |
| 131: | }; |
| 132: | |
| 133: | |
| 134: | |
| 135: | ///--- Exported API |
| 136: | |
| 137: | /** |
| 138: | * Creates an API rate limiter that can be plugged into the standard |
| 139: | * restify request handling pipeline. |
| 140: | * |
| 141: | * This throttle gives you three options on which to throttle: |
| 142: | * username, IP address and 'X-Forwarded-For'. IP/XFF is a /32 match, |
| 143: | * so keep that in mind if using it. Username takes the user specified |
| 144: | * on req.username (which gets automagically set for supported Authorization |
| 145: | * types; otherwise set it yourself with a filter that runs before this). |
| 146: | * |
| 147: | * In both cases, you can set a `burst` and a `rate` (in requests/seconds), |
| 148: | * as an integer/float. Those really translate to the `TokenBucket` |
| 149: | * algorithm, so read up on that (or see the comments above...). |
| 150: | * |
| 151: | * In either case, the top level options burst/rate set a blanket throttling |
| 152: | * rate, and then you can pass in an `overrides` object with rates for |
| 153: | * specific users/IPs. You should use overrides sparingly, as we make a new |
| 154: | * TokenBucket to track each. |
| 155: | * |
| 156: | * On the `options` object ip and username are treated as an XOR. |
| 157: | * |
| 158: | * An example options object with overrides: |
| 159: | * |
| 160: | * { |
| 161: | * burst: 10, // Max 10 concurrent requests (if tokens) |
| 162: | * rate: 0.5, // Steady state: 1 request / 2 seconds |
| 163: | * ip: true, // throttle per IP |
| 164: | * overrides: { |
| 165: | * '192.168.1.1': { |
| 166: | * burst: 0, |
| 167: | * rate: 0 // unlimited |
| 168: | * } |
| 169: | * } |
| 170: | * |
| 171: | * |
| 172: | * @param {Object} options required options with: |
| 173: | * - {Number} burst (required). |
| 174: | * - {Number} rate (required). |
| 175: | * - {Boolean} ip (optional). |
| 176: | * - {Boolean} username (optional). |
| 177: | * - {Boolean} xff (optional). |
| 178: | * - {Object} overrides (optional). |
| 179: | * - {Object} tokensTable: a storage engine this plugin will |
| 180: | * use to store throttling keys -> bucket mappings. |
| 181: | * If you don't specify this, the default is to |
| 182: | * use an in-memory O(1) LRU, with 10k distinct |
| 183: | * keys. Any implementation just needs to support |
| 184: | * put/get. |
| 185: | * - {Number} maxKeys: If using the default implementation, |
| 186: | * you can specify how large you want the table to |
| 187: | * be. Default is 10000. |
| 188: | * @return {Function} of type f(req, res, next) to be plugged into a route. |
| 189: | * @throws {TypeError} on bad input. |
| 190: | */ |
| 191: | function throttle(options) { |
| 192: | assert.object(options, 'options'); |
| 193: | assert.number(options.burst, 'options.burst'); |
| 194: | assert.number(options.rate, 'options.rate'); |
| 195: | if (!xor(options.ip, options.xff, options.username)) |
| 196: | throw new Error('(ip ^ username ^ xff)'); |
| 197: | |
| 198: | var burst = options.burst; |
| 199: | var rate = options.rate; |
| 200: | var table = options.tokensTable || |
| 201: | new TokenTable({size: options.maxKeys}); |
| 202: | |
| 203: | function rateLimit(req, res, next) { |
| 204: | var attr; |
| 205: | if (options.ip) { |
| 206: | attr = req.connection.remoteAddress; |
| 207: | } else if (options.xff) { |
| 208: | attr = req.headers['x-forwarded-for']; |
| 209: | } else if (options.username) { |
| 210: | attr = req.username; |
| 211: | } else { |
| 212: | req.log.warn({config: options}, |
| 213: | 'Invalid throttle configuration'); |
| 214: | return (next()); |
| 215: | } |
| 216: | |
| 217: | // Before bothering with overrides, see if this request |
| 218: | // even matches |
| 219: | if (!attr) |
| 220: | return (next()); |
| 221: | |
| 222: | // Check the overrides |
| 223: | if (options.overrides && |
| 224: | options.overrides[attr] && |
| 225: | options.overrides[attr].burst !== undefined && |
| 226: | options.overrides[attr].rate !== undefined) { |
| 227: | |
| 228: | burst = options.overrides[attr].burst; |
| 229: | rate = options.overrides[attr].rate; |
| 230: | } |
| 231: | |
| 232: | if (!rate || !burst) |
| 233: | return (next()); |
| 234: | |
| 235: | var bucket = table.get(attr); |
| 236: | if (!bucket) { |
| 237: | bucket = new TokenBucket({ |
| 238: | capacity: burst, |
| 239: | fillRate: rate |
| 240: | }); |
| 241: | table.put(attr, bucket); |
| 242: | } |
| 243: | |
| 244: | req.log.trace('Throttle(%s): num_tokens= %d', |
| 245: | attr, bucket.tokens); |
| 246: | |
| 247: | if (!bucket.consume(1)) { |
| 248: | req.log.info({ |
| 249: | address: req.connection.remoteAddress || '?', |
| 250: | method: req.method, |
| 251: | url: req.url, |
| 252: | user: req.username || '?' |
| 253: | }, 'Throttling'); |
| 254: | |
| 255: | |
| 256: | // Until https://github.com/joyent/node/pull/2371 is in |
| 257: | var msg = sprintf(MESSAGE, rate); |
| 258: | return (next(new TooManyRequestsError(msg))); |
| 259: | } |
| 260: | |
| 261: | return (next()); |
| 262: | } |
| 263: | |
| 264: | return (rateLimit); |
| 265: | } |
| 266: | |
| 267: | module.exports = throttle; |
