diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 00495dc..0000000
--- a/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/nbproject
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..8ec9809
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,5 @@
+language: node_js
+node_js:
+ - '5'
+ - '4'
+ - '0.12'
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
index 1c59043..8bce9de 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,5 @@
Copyright (c) 2010 Aleksander Williams
+Copyright (c) 2016 Moos (github.com/moos/mini-bench) (fork)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 64e303b..0000000
--- a/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Run all tests
-#
-test:
- node test/core.js
-
-.PHONY: test
\ No newline at end of file
diff --git a/README.md b/README.md
index 1ea1581..f25b465 100644
--- a/README.md
+++ b/README.md
@@ -1,23 +1,32 @@
-uubench
+mini-bench
=======
+[](https://www.npmjs.com/package/mini-bench)
+[](https://travis-ci.org/moos/mini-bench)
+
> A tiny asynchronous JavaScript benchmarking library
-uubench provides a simple harness for measuring the execution time of JavaScript code. Design your experiments, analyze the numbers and present the data as you see fit.
+mini-bench provides a simple harness for measuring the execution time of JavaScript code. Design your experiments, analyze the numbers and present the data as you see fit.
Features:
-* small (~100 LOC)
+* small (< 200 LOC)
* asynchronous, evented operation
-* fixed or adaptive test cycles
-* no DOM-related cruft
+* **fixed** as well as _adaptive_ test cycles used in [benchmark](https://github.com/bestiejs/benchmark.js)
+* easy to use
+
+**Note**: mini-bench is an up-to-date fork of [uubench](https://github.com/akdubya/uubench).
+
Synopsis
--------
-Set up a benchmark suite:
+Set up a suite:
- var suite = new uubench.Suite({
+```js
+ var Bench = require('mini-bench');
+
+ var suite = new Bench.Suite({
start: function() {
console.log("starting...");
},
@@ -26,11 +35,16 @@ Set up a benchmark suite:
},
done: function() {
console.log("finished");
+ },
+ section: function(name) {
+ console.log("Section: " + name);
}
});
+```
Add some benchmarks:
+```js
suite.bench("async", function(next) {
myAsyncFunc(function() {
next();
@@ -41,97 +55,114 @@ Add some benchmarks:
mySyncFunc();
next();
});
+```
-Go man go!
+Run the suite:
+```js
suite.run();
+```
Installation
------------
Via npm:
- $ npm install uubench
+ $ npm install mini-bench
In Node:
- var uubench = require('uubench');
+ var Bench = require('mini-bench');
-In the browser:
+In the browser (exposed as `MiniBench`):
-
+
Guide
-----
-By design, uubench doesn't come with extras. Instead, you use the low-level API to build your own unique benchmark suites.
+By design, mini-bench doesn't come with extras. Instead, you use the low-level API to build your own unique benchmark suites.
### Defaults
-uubench ships with the following defaults that apply to every test suite:
+mini-bench ships with the following defaults that apply to every test suite:
- uubench.defaults = {
+```js
+ Bench.defaults = {
type: "adaptive", // adaptive or fixed
iterations: 10, // starting iterations
- min: 100, // minimum run time (ms) - adaptive only
- delay: 100 // delay between tests (ms)
+ minTime: 100, // minimum run time (ms) - adaptive only
+ delay: 100, // delay between tests (ms)
+ async: true // run benches in async mode (all at once)
}
+```
You may override these globally or per-suite. Read on to find out what each option does.
### Fixed test cycles
-By default uubench uses adaptive test cycles to allow reasonable execution time across different environments. To use fixed cycles instead, set the `type` to "fixed":
+By default mini-bench uses adaptive test cycles to allow reasonable execution time across different environments. To use fixed cycles instead, set the `type` to "fixed":
- var suite = new uubench.Suite({
+```js
+ var suite = new Bench.Suite({
type: "fixed",
iterations: 1000, // run each benchmark exactly 1000 times
...
});
+```
### Setting the minimum runtime
-uubench defaults to a minimum runtime of 100ms in adaptive mode. To adjust this runtime:
+mini-bench defaults to a minimum run time of 100ms in adaptive mode. To adjust this run time:
- var suite = new uubench.Suite({
- min: 1000, // each benchmark should run for at least 1000ms
+```js
+ var suite = new Bench.Suite({
+ minTime: 1000, // each benchmark should run for at least 1000ms
...
});
+```
### Starting iterations
In adaptive mode it is sometimes useful to bump up the starting iterations to reach the minimum runtime faster:
- var suite = new uubench.Suite({
+```js
+ var suite = new Bench.Suite({
iterations: 1000, // run each benchmark a minimum of 1000 times
...
});
+```
### Setting the benchmark delay
-uubench imposes a 100ms delay between benchmarks to give any UI elements that might be present time to update. This delay can be tweaked:
+mini-bench imposes a 100ms delay between benchmarks to give any UI elements that might be present time to update. This delay can be tweaked:
- var suite = new uubench.Suite({
+```js
+ var suite = new Bench.Suite({
delay: 500, // 500ms delay between benchmarks
...
});
+```
### Disabling auto-looping
-To manually loop within a given benchmark, add a second argument to the benchmark's argument list. uubench will then automatically disable auto-looping:
+To manually loop within a given benchmark, add a second argument to the benchmark's argument list. mini-bench will then automatically disable auto-looping:
+```js
suite.bench("foo", function(next, count) {
while (count--) {
...
}
next();
});
+```
### Multiple runs
To collect benchmark data over multiple runs, simply rerun the suite on completion:
- var suite = new uubench.Suite({
+```js
+ var suite = new Bench.Suite({
...
done: function() {
if (--runCounter) {
@@ -141,30 +172,123 @@ To collect benchmark data over multiple runs, simply rerun the suite on completi
}
}
});
+```
Beware of relying on multiple in-process runs to establish statistical relevance. Better data can be obtained by completely re-running your test scripts.
+### Running in sync mode
+
+A suite may have multiple benchmarks. To run benchmarks one-at-a-time in the order they were added, set `async` option to false.
+
+```js
+ var suite = new Bench.Suite({
+ async: false, // run benches in sync mode (one at a time in order)
+ ...
+ });
+```
+
+
+### Section markers
+
+Longer suites that have multiple benches may use the `section()` method. A section is run in order (when sync option is true) and can be used to visually group benches and optionally modify parameters.
+
+```js
+ suite.section("foo section", function(next) {
+ suite.options.iterations = 1;
+ next();
+ })
+ .bench("foo1", function(next) {
+ ...
+ next();
+ })
+ .bench("foo2", function(next) {
+ ...
+ next();
+ });
+
+ suite.section("bar section", function(next) {
+ // change iterations going forward
+ suite.options.iterations = 10;
+ next();
+ })
+ .bench("bar", function(next) {
+ ...
+ next();
+ });
+```
+
+A section emits a "section" event.
+
+### Chaining
+
+As of v0.0.2 `bench()` and `section()` are _chainable_. This allows for easier grouping and enabling/disabling of groups.
+
+```js
+ suite.section('sec 1')
+ .bench()
+ .bench()
+ ...
+
+ suite.section('sec 2')
+ .bench()
+ .bench()
+ ...
+
+ suite.run();
+```
+
+or
+
+```js
+ suite.bench().bench().run();
+```
+
+
### Stats
-Rather than imposing a limited statistical model on benchmark data, uubench gives you the raw numbers. If you want to go nuts with the math have a look at [this gist](http://gist.github.com/642690).
+Rather than imposing a limited statistical model on benchmark data, mini-bench gives you the raw numbers. If you want to go nuts with the math have a look at [this gist](http://gist.github.com/642690).
### Loop calibration
In most cases auto looping doesn't add enough overhead to benchmark times to be worth worrying about, but extremely fast benchmarks can suffer. Add a calibration test if you want to correct for this overhead:
+```js
suite.bench("calibrate", function(next) {
next();
});
+```
You can then subtract the elapsed time of the "calibrate" test from other tests in the suite.
Examples
--------
-* Dust browser benchmarks:
+* Dust browser benchmarks:
* Dust node benchmarks:
+
+## Change log
+
+v1.0 (Breaking change)
+- `min` option changed to `minTime`
+- `sync` option changed to `async` (default: true)
+- added UMD module loader
+- renamed and published to npm as [mini-bench](https://npmjs.org/package/mini-bench)
+
+v0.0.2 (start of moos fork)
+- added `sync` option to run tests in sync mode (default: false)
+- added `section` method to group similar tests, fires "section" event.
+- added chaining
+
+
About
-----
-uubench was inspired by the venerable [jslitmus](http://github.com/broofa/jslitmus)
\ No newline at end of file
+mini-bench is a fork of [uubench](https://github.com/akdubya/uubench).
+
+uubench was inspired by the venerable [jslitmus](http://github.com/broofa/jslitmus)
+
+
+License
+-------
+MIT
\ No newline at end of file
diff --git a/mini-bench.js b/mini-bench.js
new file mode 100644
index 0000000..b69e602
--- /dev/null
+++ b/mini-bench.js
@@ -0,0 +1,179 @@
+/*!
+ mini-bench
+
+ Copyright (c) 2016, Moos (fork)
+ http://github.com/moos/mini-bench
+
+ Original copyright notice:
+
+ uubench - Async Benchmarking v0.0.1
+ http://github.com/akdubya/uubench
+
+ Copyright (c) 2010, Aleksander Williams
+ Released under the MIT License.
+*/
+
+;(function (root, factory) {
+ if (typeof define === 'function' && define.amd) {
+ define(factory);
+ } else if (typeof exports === 'object') {
+ module.exports = factory();
+ } else {
+ root.MiniBench = factory();
+ }
+
+}(this, function () {
+
+ var Bench = {};
+
+ Bench.defaults = {
+ type : "adaptive", // adaptive or fixed
+ iterations: 10, // starting iterations
+ minTime : 100, // minimum run time (ms) - adaptive only
+ delay : 100, // delay between tests (ms)
+ async : true // set to false to run benches in sync mode
+ };
+
+ /**
+ * single test class
+ *
+ * @param id
+ * @param test
+ * @param options
+ * @param callback
+ * @constructor
+ */
+ function Test(id, test, options, callback) {
+ this.id = id;
+ this.options = options;
+ this.test = test;
+ this.loop = test.length > 1;
+ this.callback = callback;
+ }
+
+ Test.prototype.run = function(iter, startTime) {
+ var self = this,
+ fn = self.test,
+ type = self.options.type,
+ checkfn = type === "adaptive" ? adaptive : fixed,
+ i = type === "section" ? 1 : iter,
+ pend = i,
+ minTime = self.options.minTime,
+ start = startTime || new Date();
+
+ if (self.loop) {
+ pend = 1;
+ fn(checkfn, i);
+ } else {
+ while (i--) {
+ fn(checkfn);
+ }
+ }
+
+ function fixed() {
+ if (--pend === 0) {
+ var elapsed = new Date() - start;
+ self.callback({iterations: iter, elapsed: elapsed});
+ }
+ }
+
+ function adaptive() {
+ if (--pend === 0) {
+ var elapsed = new Date() - start;
+
+ if (elapsed < minTime) {
+ var nextIter = 10 * iter; // incremental increase in next iteration
+ if (elapsed > 0) nextIter = Math.floor(iter * minTime / elapsed * 0.9);
+
+ self.run(nextIter, start); // preserve original start time
+ } else {
+ self.callback({iterations: iter, elapsed: elapsed});
+ }
+ }
+ }
+ };
+
+
+ /**
+ * Suite class
+ *
+ * @param opts {object} - any options in Bench.defaults
+ * @constructor
+ */
+ function Suite(opts) {
+ for (var key in Bench.defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = Bench.defaults[key];
+ }
+ }
+ this.options = opts;
+ this.tests = [];
+ }
+
+ Suite.prototype.bench = function(name, fn) {
+ var self = this;
+ self.tests.push(new Test(name, fn, this.options, function(stats) {
+ self.emit("result", name, stats);
+ self.check();
+ }));
+ return this;
+ };
+
+ // a section is a non-bench step performed in order
+ Suite.prototype.section = function(name, fn) {
+ var self = this;
+ self.tests.push(new Test(name, fn, {type:"section"}, function(stats) {
+ self.emit("section", name, stats);
+ self.check();
+ }));
+ return this;
+ };
+
+ Suite.prototype.run = function() {
+ if (this.pending) return;
+ var self = this,
+ len = self.tests.length;
+
+ self.emit("start", self.tests);
+ self.start = new Date().getTime();
+ self.pending = len;
+
+ if (!this.options.async) {
+ self.runOne();
+ } else {
+ while (len--) self.runOne();
+ }
+ };
+
+ Suite.prototype.runOne = function() {
+ var self = this;
+ setTimeout(function() {
+ (self.tests.shift()).run(self.options.iterations);
+ }, self.options.delay);
+ };
+
+ Suite.prototype.check = function() {
+ if (--this.pending) {
+ !this.options.async && this.runOne();
+ return;
+ }
+ this.emit("done", new Date().getTime() - this.start);
+ };
+
+ Suite.prototype.emit = function(type) {
+ var event = this.options[type];
+ if (event) {
+ event.apply(this, Array.prototype.slice.call(arguments, 1));
+ }
+ };
+
+
+ // exports
+ Bench.Test = Test;
+ Bench.Suite = Suite;
+
+ return Bench;
+
+}));
+
+
diff --git a/package.json b/package.json
index 282d616..8cb942b 100644
--- a/package.json
+++ b/package.json
@@ -1,11 +1,13 @@
{
- "name" : "uubench",
- "version" : "0.0.1",
+ "name" : "mini-bench",
+ "version" : "1.0.0",
"description" : "A tiny asynchronous JavaScript benchmarking library",
"author" : "Aleksander Williams",
- "contributors" : [],
- "url" : "http://github.com/akdubya/uubench",
+ "contributors" : ["Moos "],
+ "repository" : "https://github.com/moos/mini-bench",
"keywords" : ["benchmarking"],
- "main" : "uubench",
- "scripts" : { "test": "make test" }
+ "main" : "mini-bench",
+ "scripts" : {
+ "test": "node test/core.js"
+ }
}
\ No newline at end of file
diff --git a/test/core.js b/test/core.js
index 3f72167..1ceaa37 100644
--- a/test/core.js
+++ b/test/core.js
@@ -1,5 +1,5 @@
var uutest = require('./uutest'),
- uubench = require('../uubench');
+ Bench = require('../mini-bench');
function dumpError(err) {
var out = err.testName + " -> ";
@@ -10,24 +10,22 @@ function dumpError(err) {
return out + err.stack;
}
-uubench.defaults = {
- type: "adaptive",
+var suite = new uutest.Suite({
+ type : "adaptive",
iterations: 1,
- min: 1,
- delay: 0
-}
+ minTime : 1,
+ delay : 0,
+ timeout : 1500,
-var suite = new uutest.Suite({
- pass: function() {
- process.stdout.write(".");
+ pass: function (type) {
+ console.log('%s: passed', type);
},
- fail: function(err) {
- process.stdout.write("F");
+ fail: function (err) {
+ console.log('%s: failed', err.testName);
},
- done: function(passed, failed, elapsed) {
- process.stdout.write("\n");
+ done: function (passed, failed, elapsed) {
console.log(passed + " passed " + failed + " failed " + "(" + elapsed + "ms)");
- this.errors.forEach(function(err) {
+ this.errors.forEach(function (err) {
console.log(dumpError(err));
});
}
@@ -36,7 +34,7 @@ var suite = new uutest.Suite({
suite.test("basic", function() {
var unit = this;
- var suite = new uubench.Suite({
+ var suite = new Bench.Suite({
done: function() {
unit.pass();
}
@@ -45,7 +43,7 @@ suite.test("basic", function() {
suite.bench("async", function(next) {
setTimeout(function() {
next();
- }, 2);
+ }, 20);
});
suite.bench("sync", function(next) {
@@ -55,15 +53,19 @@ suite.test("basic", function() {
suite.run();
});
+
suite.test("fixed", function() {
- var unit = this, iter;
+ var unit = this,
+ iter;
- var suite = new uubench.Suite({
+ var suite = new Bench.Suite({
type: "fixed",
iterations: 100,
+
result: function(name, stats) {
iter = stats.iterations;
},
+
done: function() {
try {
unit.equals(iter, 100);
@@ -82,4 +84,48 @@ suite.test("fixed", function() {
suite.run();
});
+
+suite.test("async option", function() {
+ var unit = this,
+ names = '',
+ section;
+
+ var suite = new Bench.Suite({
+ type: "fixed",
+ iterations: 10,
+ async: false, // run in sync!
+
+ result: function(name, stats) {
+ names += name;
+ iter = stats.iterations;
+ },
+
+ done: function() {
+ try {
+ unit.equals(section, 'section 1');
+ unit.equals(names, 'step 1step 2');
+ } catch(e) {
+ unit.fail(e);
+ return;
+ }
+ unit.pass();
+ },
+
+ section: function(name) {
+ section = name;
+ }
+ });
+
+ suite.section('section 1', function(next){
+ next();
+ })
+ .bench("step 1", function(next) {
+ setTimeout(next, 50);
+ })
+ .bench("step 2", function(next) {
+ next();
+ })
+ .run();
+});
+
suite.run();
\ No newline at end of file
diff --git a/uubench.js b/uubench.js
deleted file mode 100644
index 7337165..0000000
--- a/uubench.js
+++ /dev/null
@@ -1,115 +0,0 @@
-//
-// uubench - Async Benchmarking v0.0.1
-// http://github.com/akdubya/uubench
-//
-// Copyright (c) 2010, Aleksander Williams
-// Released under the MIT License.
-//
-
-(function(uubench){
-
-function Bench(id, test, options, callback) {
- this.id = id;
- this.options = options;
- this.test = test;
- this.loop = test.length > 1;
- this.callback = callback;
-}
-
-Bench.prototype.run = function(iter) {
- var self = this, fn = self.test,
- checkfn = self.options.type === "adaptive" ? adaptive : fixed,
- i = iter, pend = i,
- min = self.options.min, start;
-
- if (self.loop) {
- pend = 1;
- start = new Date();
- fn(checkfn, i);
- } else {
- start = new Date();
- while (i--) {
- fn(checkfn);
- }
- }
-
- function fixed() {
- if (--pend === 0) {
- var elapsed = new Date() - start;
- self.callback({iterations: iter, elapsed: elapsed});
- }
- }
-
- function adaptive() {
- if (--pend === 0) {
- var elapsed = new Date() - start;
- if (elapsed < min) {
- self.run(iter*2);
- } else {
- self.callback({iterations: iter, elapsed: elapsed});
- }
- }
- }
-}
-
-uubench.Bench = Bench;
-
-uubench.defaults = {
- type: "adaptive", // adaptive or fixed
- iterations: 10, // starting iterations
- min: 100, // minimum run time (ms) - adaptive only
- delay: 100 // delay between tests (ms)
-}
-
-function Suite(opts) {
- for (var key in uubench.defaults) {
- if (opts[key] === undefined) {
- opts[key] = uubench.defaults[key];
- }
- }
- this.options = opts;
- this.tests = [];
-}
-
-Suite.prototype.bench = function(name, fn) {
- var self = this;
- self.tests.push(new Bench(name, fn, this.options, function(stats) {
- self.emit("result", name, stats);
- self.pending--;
- self.check();
- }));
-}
-
-Suite.prototype.run = function() {
- if (this.pending) return;
- var self = this, len = self.tests.length;
- self.emit("start", self.tests);
- self.start = new Date().getTime();
- self.pending = len;
- for (var i=0; i