Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: jruby/jruby
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 08a014e27ee3
Choose a base ref
...
head repository: jruby/jruby
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: 25fc98368f1c
Choose a head ref
  • 3 commits
  • 2 files changed
  • 1 contributor

Commits on Jun 6, 2016

  1. Copy the full SHA
    a99e15b View commit details
  2. Copy the full SHA
    6de2b9b View commit details
  3. Copy the full SHA
    25fc983 View commit details
Showing with 35 additions and 11 deletions.
  1. +2 −0 ci.hocon
  2. +33 −11 mx.jruby/mx_jruby.py
2 changes: 2 additions & 0 deletions ci.hocon
Original file line number Diff line number Diff line change
@@ -115,6 +115,8 @@ metrics: {
[mx, benchmark, allocation]
] ${post-process-and-upload-results} [
[mx, benchmark, minheap]
] ${post-process-and-upload-results} [
[mx, benchmark, time]
] ${post-process-and-upload-results}
}

44 changes: 33 additions & 11 deletions mx.jruby/mx_jruby.py
Original file line number Diff line number Diff line change
@@ -140,7 +140,7 @@ def fixUpResult(result):
})
return result

return [fixUpResult(self.runBenchmark(b, bmSuiteArgs)) for b in benchmarks or self.benchmarks()]
return [fixUpResult(r) for b in benchmarks or self.benchmarks() for r in self.runBenchmark(b, bmSuiteArgs)]

def runBenchmark(self, benchmark, bmSuiteArgs):
raise NotImplementedError()
@@ -165,17 +165,14 @@ def runBenchmark(self, benchmark, bmSuiteArgs):

data = json.loads(out.data)

result = {
return [{
'benchmark': benchmark,
'metric.name': 'memory',
'metric.name': 'time',
'metric.value': data['median'],
'metric.unit': 'B',
'metric.unit': 's',
'metric.better': 'lower',
'extra.metric.error-num': data['error'],
'extra.metric.human': data['human']
}

return result
}]

class MinHeapBenchmarkSuite(RubyBenchmarkSuite):
def name(self):
@@ -193,16 +190,41 @@ def runBenchmark(self, benchmark, bmSuiteArgs):

data = json.loads(out.data)

result = {
return [{
'benchmark': benchmark,
'metric.name': 'memory',
'metric.value': data['min'],
'metric.unit': 'MB',
'metric.better': 'lower',
'extra.metric.human': data['human']
}
}]

class TimeBenchmarkSuite(RubyBenchmarkSuite):
def name(self):
return 'time'

def benchmarks(self):
return metrics_benchmarks.keys()

def runBenchmark(self, benchmark, bmSuiteArgs):
out = mx.OutputCapture()

options = []

return result
jt(['metrics', 'time', '--json'] + metrics_benchmarks[benchmark] + bmSuiteArgs, out=out)

data = json.loads(out.data)

return [{
'benchmark': benchmark,
'extra.metric.region': r,
'metric.name': 'time',
'metric.value': t,
'metric.unit': 's',
'metric.better': 'lower',
'extra.metric.human': data['human']
} for r, t in data.items() if r != 'human']

mx_benchmark.add_bm_suite(AllocationBenchmarkSuite())
mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite())
mx_benchmark.add_bm_suite(TimeBenchmarkSuite())