diff --git a/mx.fastr/mx_fastr.py b/mx.fastr/mx_fastr.py
index dd2c0ad0ac18d8c5da04909be50cffffb7c9d146..b352a67e58a43776de01cd87414d7c66c24a6b76 100644
--- a/mx.fastr/mx_fastr.py
+++ b/mx.fastr/mx_fastr.py
@@ -20,6 +20,7 @@
 # or visit www.oracle.com if you need additional information or have any
 # questions.
 #
+import subprocess, tempfile, shutil, filecmp
 from os.path import join, sep
 from argparse import ArgumentParser
 import shlex
@@ -65,11 +66,12 @@ def _extract_vmArgs(args):
     return (vmArgs, rArgs)
 
 def _truffle_r_gate_body(args, tasks):
+    _check_autogen_tests(False)
     t = mx_graal.Task('BuildHotSpotGraalServer: product')
     mx_graal.buildvms(['--vms', 'server', '--builds', 'product'])
     tasks.append(t.stop())
 
-    with mx_graal.VM('original', 'product'):
+    with mx_graal.VM('server', 'product'):
         # check that the expected test output file is up to date
         t = mx_graal.Task('UnitTests: ExpectedTestOutput file check')
         junit(['--tests', _default_unit_tests(), '--check-expected-output'])
@@ -91,12 +93,35 @@ def gate(args):
     t.stop()
     if rc != 0:
         mx.abort('copyright errors')
+    _check_autogen_tests(True)
     mx_graal.gate(args, _truffle_r_gate_body)
 
+_tempdir = None
+
+def _check_autogen_tests(copy):
+    # make copies of AllTests and FailingTests, as these will be regenerated by the gate
+    # and may be out of sync
+    test_srcdir = _test_srcdir()
+    all_tests = join(test_srcdir, 'all', 'AllTests.java')
+    failing_tests = join(test_srcdir, 'failing', 'FailingTests.java')
+    global _tempdir
+    if copy:
+        _tempdir = tempfile.mkdtemp()
+        shutil.copy(all_tests, _tempdir)
+        shutil.copy(failing_tests, _tempdir)
+    else:
+        files_equal = filecmp.cmp(all_tests, join(_tempdir, 'AllTests.java')) and filecmp.cmp(failing_tests, join(_tempdir, 'FailingTests.java'))
+        shutil.rmtree(_tempdir)
+        if not files_equal:
+            mx.abort('AllTests.java and/or FailingTests.java are out of sync, regenerate with mx rignoretests')
+
+def _test_srcdir():
+    tp = 'com.oracle.truffle.r.test'
+    return join(mx.project(tp).dir, 'src', tp.replace('.', sep))
+
 def _junit_r_harness(args, vmArgs, junitArgs):
     # always pass the directory where the expected output file should reside
-    tp = 'com.oracle.truffle.r.test'
-    runlistener_arg = 'expected=' + join(mx.project(tp).dir, 'src', tp.replace('.', sep))
+    runlistener_arg = 'expected=' + _test_srcdir()
     # there should not be any unparsed arguments at this stage
     if args.remainder:
         mx.abort('unexpected arguments: ' + str(args.remainder).strip('[]') + '; did you forget --tests')
@@ -126,7 +151,7 @@ def _junit_r_harness(args, vmArgs, junitArgs):
 
     if args.gen_diff_output:
         runlistener_arg = add_arg_separator()
-        runlistener_arg = 'gen-diff=' + args.gen_diff_output
+        runlistener_arg += 'gen-diff=' + args.gen_diff_output
 
 #    if args.test_methods:
 #        runlistener_arg = add_arg_separator()
@@ -168,7 +193,63 @@ def ignoredtests(args):
     mx.clean(testOnly)
     mx.build(testOnly)
 
+_fastr_suite = None
+
+def rbench(args):
+    '''run an R benchmark'''
+    parser = ArgumentParser(prog='mx rbench')
+    parser.add_argument('bm', action='store', metavar='benchmarkgroup.name', help='qualified name of benchmark')
+    parser.add_argument('--path', action='store_true', help='print path to benchmark')
+    parser.add_argument('--J', dest='vm_args', help='Graal VM arguments (e.g. --J @-dsa)', metavar='@<args>')
+    parser.add_argument('--gnur', action='store_true', help='run under GnuR')
+    parser.add_argument('--gnur-jit', action='store_true', help='enable GnuR JIT')
+    args = parser.parse_args(args)
+
+    # dynamically load the benchmarks suite
+    hg_base = mx.get_env('HG_BASE')
+    alternate = None if hg_base is None else join(hg_base, 'r_benchmarks')
+    bm_suite = _fastr_suite.import_suite('r_benchmarks', version=None, alternate=alternate)
+    mx.build_suite(bm_suite)
+
+    # Get the R script location via helper app
+    # N.B. we do not use mx.java() as that might check options we don't want for the helper, e.g. debugging agent
+    javacmd = ['java', '-cp', mx.classpath('r.benchmarks'), 'r.benchmarks.RBenchmarks', args.bm]
+    try:
+        bmpath = subprocess.check_output(javacmd).rstrip()
+        if args.path:
+            print bmpath
+        else:
+            command = []
+            if args.vm_args is not None:
+                command = ['--J', args.vm_args]
+            command = command + ['-f', bmpath]
+            if args.gnur:
+                env = os.environ
+                if args.gnur_jit:
+                    env['R_ENABLE_JIT'] = '3'
+                rc = subprocess.call(['R', '--slave'] + command, env=env)
+                if rc != 0:
+                    mx.abort('GnuR failed with rc: ' + rc)
+            else:
+                runRCommand(command)
+    except subprocess.CalledProcessError:
+        mx.abort(1)
+
+def _bench_harness_body(args, vmArgs):
+    mx_graal.buildvms(['--vms', 'server', '--builds', 'product'])
+    marks = ['shootout.binarytrees', 'shootout.fannkuchredux', 'shootout.fasta', 'shootout.fastaredux',
+             'shootout.knucleotide', 'shootout.mandelbrot-ascii', 'shootout.nbody', 'shootout.pidigits',
+             'shootout.regexdna', 'shootout.reversecomplement', 'shootout.spectralnorm']
+    with mx_graal.VM('server', 'product'):
+        for mark in marks:
+            rbench([mark])
+
+def bench(args):
+    mx.bench(args, harness=_bench_harness_body)
+
 def mx_init(suite):
+    global _fastr_suite
+    _fastr_suite = suite
     commands = {
         'gate' : [gate, ''],
         'r' : [runRCommand, '[options]'],
@@ -176,6 +257,8 @@ def mx_init(suite):
         'rtestgen' : [testgen, ''],
         'rignoredtests' : [ignoredtests, ''],
         'junit' : [junit, ['options']],
+        'rbench' : [rbench, 'options'],
+        'bench' : [bench, 'options'],
     }
     mx.update_commands(suite, commands)