Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: jruby/jruby
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 9f706bba1179
Choose a base ref
...
head repository: jruby/jruby
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: a33816205c69
Choose a head ref
  • 5 commits
  • 2 files changed
  • 1 contributor

Commits on Jun 13, 2016

  1. Copy the full SHA
    3c64f60 View commit details
  2. Copy the full SHA
    7771a7f View commit details
  3. Copy the full SHA
    b11ca8f View commit details
  4. Copy the full SHA
    f8978cd View commit details
  5. Copy the full SHA
    a338162 View commit details
Showing with 28 additions and 29 deletions.
  1. +13 −17 mx.jruby/mx_jruby.py
  2. +15 −12 tool/jt.rb
30 changes: 13 additions & 17 deletions mx.jruby/mx_jruby.py
Original file line number Diff line number Diff line change
@@ -168,31 +168,28 @@ def name(self):

def runBenchmark(self, benchmark, bmSuiteArgs):
out = mx.OutputCapture()

options = []


jt(['metrics', 'alloc', '--json'] + metrics_benchmarks[benchmark] + bmSuiteArgs, out=out)

data = json.loads(out.data)

return [{
'benchmark': benchmark,
'metric.name': 'memory',
'metric.value': data['median'],
'metric.value': sample,
'metric.unit': 'B',
'metric.better': 'lower',
'extra.metric.human': data['human']
}]
'metric.iteration': n,
'extra.metric.human': '%d/%d %s' % (n, len(data['samples']), data['human'])
} for n, sample in enumerate(data['samples'])]

class MinHeapBenchmarkSuite(MetricsBenchmarkSuite):
def name(self):
return 'minheap'

def runBenchmark(self, benchmark, bmSuiteArgs):
out = mx.OutputCapture()

options = []


jt(['metrics', 'minheap', '--json'] + metrics_benchmarks[benchmark] + bmSuiteArgs, out=out)

data = json.loads(out.data)
@@ -212,22 +209,21 @@ def name(self):

def runBenchmark(self, benchmark, bmSuiteArgs):
out = mx.OutputCapture()

options = []


jt(['metrics', 'time', '--json'] + metrics_benchmarks[benchmark] + bmSuiteArgs, out=out)

data = json.loads(out.data)

return [{
'benchmark': benchmark,
'extra.metric.region': r,
'extra.metric.region': region,
'metric.name': 'time',
'metric.value': t,
'metric.value': sample,
'metric.unit': 's',
'metric.better': 'lower',
'extra.metric.human': data['human']
} for r, t in data.items() if r != 'human']
'metric.iteration': n,
'extra.metric.human': '%d/%d %s' % (n, len(region_data['samples']), region_data['human'])
} for region, region_data in data.items() for n, sample in enumerate(region_data['samples'])]

mx_benchmark.add_bm_suite(AllocationBenchmarkSuite())
mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite())
27 changes: 15 additions & 12 deletions tool/jt.rb
Original file line number Diff line number Diff line change
@@ -374,7 +374,7 @@ def help
puts ' --server run an instrumentation server on port 8080'
puts ' --igv make sure IGV is running and dump Graal graphs after partial escape (implies --graal)'
puts ' --full show all phases, not just up to the Truffle partial escape'
puts ' --jdebug run a JDWP debug server on #{JDEBUG_PORT}'
puts " --jdebug run a JDWP debug server on #{JDEBUG_PORT}"
puts ' --jexception[s] print java exceptions'
puts 'jt e 14 + 2 evaluate an expression'
puts 'jt puts 14 + 2 evaluate and print an expression'
@@ -800,9 +800,10 @@ def metrics_alloc(*args)
human_readable = "#{Utilities.human_size(median)} ± #{Utilities.human_size(error)}"
if use_json
puts JSON.generate({
median: median,
error: error,
human: human_readable
samples: samples,
median: median,
error: error,
human: human_readable
})
else
puts human_readable
@@ -854,8 +855,8 @@ def metrics_minheap(*args)
human_readable = "#{heap} MB"
if use_json
puts JSON.generate({
min: heap,
human: human_readable
min: heap,
human: human_readable
})
else
puts human_readable
@@ -881,22 +882,24 @@ def metrics_time(*args)
end
Utilities.log "\n", nil
results = {}
results['human'] = ''
samples[0].each_key do |region|
region_samples = samples.map { |s| s[region] }
mean = region_samples.inject(:+) / samples.size
results[region] = mean
human = "#{region.strip} #{mean.round(2)} s"
results[region] = {
samples: region_samples,
mean: mean,
human: human
}
if use_json
file = STDERR
else
file = STDOUT
end
human = "#{region} #{mean.round(2)} s\n"
file.print human
results['human'] += human
file.puts region[/\s*/] + human
end
if use_json
puts JSON.generate(Hash[results.map { |key, value| [key.strip, value] }])
puts JSON.generate(Hash[results.map { |key, values| [key.strip, values] }])
end
end