Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: m-labs/artiq
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: cbdc1ba46f4a
Choose a base ref
...
head repository: m-labs/artiq
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: c98e24abd477
Choose a head ref
  • 9 commits
  • 6 files changed
  • 1 contributor

Commits on Apr 4, 2015

  1. pdq2: cleanup unittest

    jordens committed Apr 4, 2015
    1
    Copy the full SHA
    16ff190 View commit details
  2. pdq2: spelling fix

    jordens committed Apr 4, 2015
    Copy the full SHA
    d165358 View commit details

Commits on Apr 5, 2015

  1. Copy the full SHA
    e50661d View commit details
  2. pipistrello: use mem_decoder

    jordens committed Apr 5, 2015
    Copy the full SHA
    0ae4492 View commit details
  3. Copy the full SHA
    afc3982 View commit details
  4. 1
    Copy the full SHA
    7b86138 View commit details
  5. Copy the full SHA
    43893c6 View commit details
  6. artiq_run: refactor, support use from within experiments

    You can always (under posix) use #!/usr/bin/env artiq_run as
    shebang for experiments and make them executable.
    Now, you can also do this (portable):
    
    if __name__ == "__main__":
        from artiq.frontend.artiq_run import run
        run()
    
    to make an experiment executable. The CLI options are all inherited.
    Also:
    
    * removed --elf: can be inferred from filename
    * did some refactoring and cleanup
    * use logging for all messages, except the result printing (use -v to get
    parameter changes and dummy scheduler actions)
    jordens committed Apr 5, 2015
    Copy the full SHA
    1a1afd5 View commit details
  7. photon_histogram: add features

    * support executing it (python3 repository/photon_histogram.py)
    * show of parameters and results
    jordens committed Apr 5, 2015
    3
    Copy the full SHA
    c98e24a View commit details
Showing with 138 additions and 119 deletions.
  1. +82 −88 artiq/frontend/artiq_run.py
  2. +8 −1 artiq/master/worker_db.py
  3. +1 −4 artiq/master/worker_impl.py
  4. +3 −5 artiq/test/pdq2.py
  5. +22 −4 examples/master/repository/photon_histogram.py
  6. +22 −17 soc/targets/artiq_pipistrello.py
170 changes: 82 additions & 88 deletions artiq/frontend/artiq_run.py
Original file line number Diff line number Diff line change
@@ -5,32 +5,36 @@
import time
from operator import itemgetter
from itertools import chain
import logging

import h5py

from artiq.language.db import *
from artiq.language.experiment import is_experiment
from artiq.language.experiment import is_experiment, Experiment
from artiq.protocols import pyon
from artiq.protocols.file_db import FlatFileDB
from artiq.master.worker_db import DBHub, ResultDB
from artiq.tools import file_import, verbosity_args, init_logger


class ELFRunner(AutoDB):
logger = logging.getLogger(__name__)


class ELFRunner(Experiment, AutoDB):
class DBKeys:
comm = Device()
file = Argument()

def run(self, filename):
with open(filename, "rb") as f:
binary = f.read()
comm.load(binary)
comm.run("run")
comm.serve(dict(), dict())
def run(self):
with open(self.file, "rb") as f:
self.comm.load(f.read())
self.comm.run("run")
self.comm.serve(dict(), dict())


class SimpleParamLogger:
def set(self, timestamp, name, value):
print("Parameter change: {} -> {}".format(name, value))
logger.info("Parameter change: {} = {}".format(name, value))


class DummyWatchdog:
@@ -52,26 +56,26 @@ def __init__(self):
def run_queued(self, run_params):
rid = self.next_rid
self.next_rid += 1
print("Queuing: {}, RID={}".format(run_params, rid))
logger.info("Queuing: %s, RID=%s", run_params, rid)
return rid

def cancel_queued(self, rid):
print("Cancelling RID {}".format(rid))
logger.info("Cancelling RID %s", rid)

def run_timed(self, run_params, next_run):
trid = self.next_trid
self.next_trid += 1
next_run_s = time.strftime("%m/%d %H:%M:%S", time.localtime(next_run))
print("Timing: {} at {}, TRID={}".format(run_params, next_run_s, trid))
logger.info("Timing: %s at %s, TRID=%s", run_params, next_run_s, trid)
return trid

def cancel_timed(self, trid):
print("Cancelling TRID {}".format(trid))
logger.info("Cancelling TRID %s", trid)

watchdog = DummyWatchdog


def get_argparser():
def get_argparser(with_file):
parser = argparse.ArgumentParser(
description="Local experiment running tool")

@@ -81,15 +85,14 @@ def get_argparser():
parser.add_argument("-p", "--pdb", default="pdb.pyon",
help="parameter database file")

parser.add_argument("-E", "--elf", default=False, action="store_true",
help="run ELF binary")
parser.add_argument("-e", "--experiment", default=None,
help="experiment to run")
parser.add_argument("-o", "--hdf5", default=None,
help="write results to specified HDF5 file"
" (default: print them)")
parser.add_argument("file",
help="file containing the experiment to run")
if with_file:
parser.add_argument("file",
help="file containing the experiment to run")
parser.add_argument("arguments", nargs="*",
help="run arguments")

@@ -99,86 +102,77 @@ def get_argparser():
def _parse_arguments(arguments):
d = {}
for argument in arguments:
name, value = argument.split("=")
name, eq, value = argument.partition("=")
d[name] = pyon.decode(value)
return d


def main():
args = get_argparser().parse_args()
def _get_experiment(module, experiment=None):
if experiment:
return getattr(module, experiment)

exps = [(k, v) for k, v in module.__dict__.items()
if is_experiment(v)]
if not exps:
logger.error("No experiments in module")
if len(exps) > 1:
logger.warning("Multiple experiments (%s), using first",
", ".join(k for (k, v) in exps))
return exps[0][1]


def _build_experiment(dbh, args):
if hasattr(args, "file"):
if args.file.endswith(".elf"):
if args.arguments:
raise ValueError("arguments not supported for ELF kernels")
if args.experiment:
raise ValueError("experiment-by-name not supported "
"for ELF kernels")
return ELFRunner(dbh, file=args.file)
else:
module = file_import(args.file)
file = args.file
else:
module = sys.modules["__main__"]
file = getattr(module, "__file__")
exp = _get_experiment(module, args.experiment)
arguments = _parse_arguments(args.arguments)
return exp(dbh,
scheduler=DummyScheduler(),
run_params=dict(file=file,
experiment=args.experiment,
arguments=arguments),
**arguments)


def run(with_file=False):
args = get_argparser(with_file).parse_args()
init_logger(args)

ddb = FlatFileDB(args.ddb)
pdb = FlatFileDB(args.pdb)
pdb.hooks.append(SimpleParamLogger())
rdb = ResultDB(lambda description: None, lambda mod: None)
dbh = DBHub(ddb, pdb, rdb)
try:
if args.elf:
if args.arguments:
print("Run arguments are not supported in ELF mode")
sys.exit(1)
exp_inst = ELFRunner(dbh)
rdb.build()
exp_inst.run(args.file)
else:
module = file_import(args.file)
if args.experiment is None:
exps = [(k, v) for k, v in module.__dict__.items()
if is_experiment(v)]
l = len(exps)
if l == 0:
print("No experiments found in module")
sys.exit(1)
elif l > 1:
print("More than one experiment found in module:")
for k, v in sorted(experiments, key=itemgetter(0)):
if v.__doc__ is None:
print(" {}".format(k))
else:
print(" {} ({})".format(
k, v.__doc__.splitlines()[0].strip()))
print("Use -u to specify which experiment to use.")
sys.exit(1)
else:
exp = exps[0][1]
else:
exp = getattr(module, args.experiment)

try:
arguments = _parse_arguments(args.arguments)
except:
print("Failed to parse run arguments")
sys.exit(1)

run_params = {
"file": args.file,
"experiment": args.experiment,
"arguments": arguments
}
exp_inst = exp(dbh,
scheduler=DummyScheduler(),
run_params=run_params,
**run_params["arguments"])
rdb.build()
exp_inst.run()
exp_inst.analyze()

if args.hdf5 is not None:
f = h5py.File(args.hdf5, "w")
try:
rdb.write_hdf5(f)
finally:
f.close()
else:
if rdb.data.read or rdb.realtime_data.read:
print("Results:")
for k, v in sorted(chain(rdb.realtime_data.read.items(),
rdb.data.read.items()),
key=itemgetter(0)):
print("{}: {}".format(k, v))
finally:
dbh.close_devices()

with DBHub(ddb, pdb, rdb) as dbh:
exp_inst = _build_experiment(dbh, args)
rdb.build()
exp_inst.run()
exp_inst.analyze()

if args.hdf5 is not None:
with h5py.File(args.hdf5, "w") as f:
rdb.write_hdf5(f)
elif rdb.data.read or rdb.realtime_data.read:
r = chain(rdb.realtime_data.read.items(), rdb.data.read.items())
for k, v in sorted(r, key=itemgetter(0)):
print("{}: {}".format(k, v))


def main():
return run(with_file=True)


if __name__ == "__main__":
main()
9 changes: 8 additions & 1 deletion artiq/master/worker_db.py
Original file line number Diff line number Diff line change
@@ -112,4 +112,11 @@ def close_devices(self):
dev.close_rpc()
elif hasattr(dev, "close"):
dev.close()
self.active_devices = OrderedDict()
self.active_devices.clear()

def __enter__(self):
return self

def __exit__(self, exc_type, exc_val, exc_tb):
self.close_devices()
return False # do not suppress exceptions within context
5 changes: 1 addition & 4 deletions artiq/master/worker_impl.py
Original file line number Diff line number Diff line change
@@ -97,9 +97,8 @@ def main():
exp_inst = None

rdb = ResultDB(init_rt_results, update_rt_results)
dbh = DBHub(ParentDDB, ParentPDB, rdb)

try:
with DBHub(ParentDDB, ParentPDB, rdb) as dbh:
while True:
obj = get_object()
action = obj["action"]
@@ -130,8 +129,6 @@ def main():
put_object({"action": "completed"})
elif action == "terminate":
break
finally:
dbh.close_devices()

if __name__ == "__main__":
main()
8 changes: 3 additions & 5 deletions artiq/test/pdq2.py
Original file line number Diff line number Diff line change
@@ -5,9 +5,6 @@
from artiq.devices.pdq2.driver import Pdq2


no_hardware = bool(os.getenv("ARTIQ_NO_HARDWARE")) \
or bool(os.getenv("ARTIQ_NO_PERIPHERALS"))

pdq2_source = os.getenv("ARTIQ_PDQ2_SOURCE")


@@ -21,6 +18,7 @@ def test_reset(self):
self.assertEqual(buf, b"\xa5\x00")

def test_program(self):
# about 0.14 ms
self.dev.program(_test_program)

@unittest.skipUnless(pdq2_source, "no pdq2 source and gateware")
@@ -40,9 +38,9 @@ def test_gateware(self):
import numpy as np
tb = Pdq2Sim(buf)
tb.ctrl_pads.trigger.reset = 0
run_simulation(tb, vcd_name="pdq2.vcd", ncycles=700)
run_simulation(tb, vcd_name="pdq2.vcd", ncycles=len(buf) + 250)
out = np.array(tb.outputs, np.uint16).view(np.int16)
for outi in out[len(buf):].T:
for outi in out[len(buf) + 100:].T:
plt.step(np.arange(len(outi)), outi)
plt.show()

26 changes: 22 additions & 4 deletions examples/master/repository/photon_histogram.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -13,27 +13,45 @@ class DBKeys:
nbins = Argument(100)
repeats = Argument(100)

cool_f = Parameter(230)
detect_f = Parameter(220)
detect_t = Parameter(100)

ion_present = Parameter(True)

hist = Result()
total = Result()

@kernel
def cool_detect(self):
with parallel:
self.bd.pulse(200*MHz, 1*ms)
self.bdd.pulse(300*MHz, 1*ms)
self.bd.pulse(210*MHz, 100*us)
self.bd.pulse(self.cool_f*MHz, 100*us)
with parallel:
self.bd.pulse(220*MHz, 100*us)
self.pmt.gate_rising(100*us)
self.bd.pulse(self.detect_f*MHz, self.detect_t*us)
self.pmt.gate_rising(self.detect_t*us)
self.bd.on(200*MHz)
self.bdd.on(300*MHz)
return self.pmt.count()

@kernel
def run(self):
hist = [0 for _ in range(self.nbins)]
total = 0

for i in range(self.repeats):
n = self.cool_detect()
if n >= self.nbins:
n = self.nbins - 1
hist[n] += 1
total += n

self.hist = hist
self.total = total
self.ion_present = total > 5*self.repeats


print(hist)
if __name__ == "__main__":
from artiq.frontend.artiq_run import run
run()
Loading