diff --git a/.coveragerc b/.coveragerc index 74367c2..f03a13c 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,9 +1,9 @@ [run] branch = True include = - nmigen/* + amaranth/* omit = - nmigen/test/* + amaranth/test/* [report] exclude_lines = diff --git a/.gitattributes b/.gitattributes index 87ab883..208af66 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1 @@ -/nmigen/vendor/* -linguist-vendored +/amaranth/vendor/* -linguist-vendored diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 6bd2866..0e3a94b 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -35,7 +35,7 @@ jobs: key: ${{ runner.os }}-wasm - name: Run tests run: | - export NMIGEN_USE_YOSYS=builtin YOSYS=yowasp-yosys SBY=yowasp-sby SMTBMC=yowasp-yosys-smtbmc + export AMARANTH_USE_YOSYS=builtin YOSYS=yowasp-yosys SBY=yowasp-sby SMTBMC=yowasp-yosys-smtbmc export PYTHONWARNINGS=error python -m coverage run -m unittest codecov @@ -56,7 +56,7 @@ jobs: run: | sphinx-build docs docs/_build - name: Publish documentation - if: github.event_name == 'push' && github.event.ref == 'refs/heads/master' + if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' uses: JamesIves/github-pages-deploy-action@releases/v3 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/CONTRIBUTING.txt b/CONTRIBUTING.txt index 24cd482..444e81c 100644 --- a/CONTRIBUTING.txt +++ b/CONTRIBUTING.txt @@ -7,8 +7,8 @@ this purpose. COPYRIGHTS and LICENSE -nMigen is licensed under the 2-clause BSD license, which is contained in the -LICENSE.txt file. +Amaranth HDL is licensed under the 2-clause BSD license, which is contained in +the LICENSE.txt file. All authors retain copyright ownership of their contributions. diff --git a/LICENSE.txt b/LICENSE.txt index 0b8ef14..11e10f8 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (C) 2019-2020 whitequark +Copyright (C) 2019-2021 Amaranth HDL contributors Copyright (C) 2011-2019 M-Labs Limited Redistribution and use in source and binary forms, with or without modification, diff --git a/README.md b/README.md index eadf51a..2a31865 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,30 @@ -# nMigen +# Amaranth HDL -## A refreshed Python toolbox for building complex digital hardware +The Amaranth project provides an open-source toolchain for developing hardware based on synchronous digital logic using the Python programming language, as well as [evaluation board definitions][amaranth-boards], a [System on Chip toolkit][amaranth-soc], and more. It aims to be easy to learn and use, reduce or eliminate common coding mistakes, and simplify the design of complex hardware with reusable components. -**Although nMigen is incomplete and in active development, it can already be used for real-world designs. The nMigen language (`nmigen.hdl.ast`, `nmigen.hdl.dsl`) will not undergo incompatible changes. The nMigen standard library (`nmigen.lib`) and build system (`nmigen.build`) will undergo minimal changes before their design is finalized.** +The Amaranth toolchain consists of the Amaranth hardware definition language, the standard library, the simulator, and the build system, covering all steps of a typical FPGA development workflow. At the same time, it does not restrict the designer’s choice of tools: existing industry-standard (System)Verilog or VHDL code can be integrated into an Amaranth-based design flow, or, conversely, Amaranth code can be integrated into an existing Verilog-based design flow. -Despite being faster than schematics entry, hardware design with Verilog and VHDL remains tedious and inefficient for several reasons. The event-driven model introduces issues and manual coding that are unnecessary for synchronous circuits, which represent the lion's share of today's logic designs. Counterintuitive arithmetic rules result in steeper learning curves and provide a fertile ground for subtle bugs in designs. Finally, support for procedural generation of logic (metaprogramming) through "generate" statements is very limited and restricts the ways code can be made generic, reused and organized. +[amaranth-boards]: https://github.com/amaranth-lang/amaranth-boards +[amaranth-soc]: https://github.com/amaranth-lang/amaranth-soc -To address those issues, we have developed the *nMigen FHDL*, a library that replaces the event-driven paradigm with the notions of combinatorial and synchronous statements, has arithmetic rules that make integers always behave like mathematical integers, and most importantly allows the design's logic to be constructed by a Python program. This last point enables hardware designers to take advantage of the richness of the Python language—object oriented programming, function parameters, generators, operator overloading, libraries, etc.—to build well organized, reusable and elegant designs. +The development of Amaranth has been supported by [SymbioticEDA][], [LambdaConcept][], and [ChipEleven][]. -Other nMigen libraries are built on FHDL and provide various tools and logic cores. nMigen also contains a simulator that allows test benches to be written in Python. - -nMigen is based on Migen, a similar Python-based hardware description language. Although Migen works very well in production, its design could be improved in many fundamental ways, and nMigen reimplements Migen concepts from scratch to do so. nMigen also provides an extensive [compatibility layer](#migration-from-migen) that makes it possible to build and simulate most Migen designs unmodified, as well as integrate modules written for Migen and nMigen. - -The development of nMigen has been supported by [SymbioticEDA][], [LambdaConcept][], and [ChipEleven][]. - -[yosys]: http://www.clifford.at/yosys/ +[yosys]: https://yosyshq.net/yosys/ [symbioticeda]: https://www.symbioticeda.com/ [lambdaconcept]: http://lambdaconcept.com/ [chipeleven]: https://chipeleven.com/ -### Introduction +## Introduction -See the [Introduction](https://nmigen.info/nmigen/latest/intro.html) section of the documentation. +See the [Introduction](https://amaranth-lang.org/amaranth/latest/intro.html) section of the documentation. -### Installation +## Installation -See the [Installation](https://nmigen.info/nmigen/latest/install.html) section of the documentation. +See the [Installation](https://amaranth-lang.org/amaranth/latest/install.html) section of the documentation. -### Supported devices +## Supported devices -nMigen can be used to target any FPGA or ASIC process that accepts behavioral Verilog-2001 as input. It also offers extended support for many FPGA families, providing toolchain integration, abstractions for device-specific primitives, and more. Specifically: +Amaranth can be used to target any FPGA or ASIC process that accepts behavioral Verilog-2001 as input. It also offers extended support for many FPGA families, providing toolchain integration, abstractions for device-specific primitives, and more. Specifically: * Lattice iCE40 (toolchains: **Yosys+nextpnr**, LSE-iCECube2, Synplify-iCECube2); * Lattice MachXO2 (toolchains: Diamond); @@ -44,22 +39,22 @@ nMigen can be used to target any FPGA or ASIC process that accepts behavioral Ve FOSS toolchains are listed in **bold**. -### Migration from Migen +## Migration from Migen -If you are already familiar with Migen, the good news is that nMigen provides a comprehensive Migen compatibility layer! An existing Migen design can be synthesized and simulated with nMigen in three steps: +If you have existing Migen code, you can use a comprehensive Migen compatibility layer provided in Amaranth. An existing Migen design can be synthesized and simulated with Amaranth in three steps: - 1. Replace all `from migen import <...>` statements with `from nmigen.compat import <...>`. + 1. Replace all `from migen import <...>` statements with `from amaranth.compat import <...>`. 2. Replace every explicit mention of the default `sys` clock domain with the new default `sync` clock domain. E.g. `ClockSignal("sys")` is changed to `ClockSignal("sync")`. - 3. Migrate from Migen build/platform system to nMigen build/platform system. nMigen does not provide a build/platform compatibility layer because both the board definition files and the platform abstraction differ too much. + 3. Migrate from Migen build/platform system to Amaranth build/platform system. Amaranth does not provide a build/platform compatibility layer because both the board definition files and the platform abstraction differ too much. -Note that nMigen will **not** produce the exact same RTL as Migen did. nMigen has been built to allow you to take advantage of the new and improved functionality it has (such as producing hierarchical RTL) while making migration as painless as possible. +Note that Amaranth will **not** produce the exact same RTL as Migen did. Amaranth has been built to allow you to take advantage of the new and improved functionality it has (such as producing hierarchical RTL) while making migration as painless as possible. -Once your design passes verification with nMigen, you can migrate it to the nMigen syntax one module at a time. Migen modules can be added to nMigen modules and vice versa, so there is no restriction on the order of migration, either. +Once your design passes verification with Amaranth, you can migrate it to the Amaranth syntax one module at a time. Migen modules can be added to Amaranth modules and vice versa, so there is no restriction on the order of migration, either. -### Community +## Community -nMigen has a dedicated IRC channel, [#nmigen at libera.chat](irc://irc.libera.chat/nmigen). Feel free to join to ask questions about using nMigen or discuss ongoing development of nMigen and its related projects. +Amaranth has a dedicated IRC channel, [#amaranth-lang at libera.chat](irc://irc.libera.chat/amaranth-lang). Feel free to join to ask questions about using Amaranth or discuss ongoing development of Amaranth and its related projects. -### License +## License -nMigen is released under the very permissive [two-clause BSD license](LICENSE.txt). Under the terms of this license, you are authorized to use nMigen for closed-source proprietary designs. +Amaranth is released under the very permissive [two-clause BSD license](LICENSE.txt). Under the terms of this license, you are authorized to use Amaranth for closed-source proprietary designs. diff --git a/amaranth/__init__.py b/amaranth/__init__.py new file mode 100644 index 0000000..7f99c4a --- /dev/null +++ b/amaranth/__init__.py @@ -0,0 +1,26 @@ +try: + try: + from importlib import metadata as importlib_metadata # py3.8+ stdlib + except ImportError: + import importlib_metadata # py3.7- shim + __version__ = importlib_metadata.version(__package__) +except ImportError: + # No importlib_metadata. This shouldn't normally happen, but some people prefer not installing + # packages via pip at all, instead using PYTHONPATH directly or copying the package files into + # `lib/pythonX.Y/site-packages`. Although not a recommended way, we still try to support it. + __version__ = "unknown" # :nocov: + + +from .hdl import * + + +__all__ = [ + "Shape", "unsigned", "signed", + "Value", "Const", "C", "Mux", "Cat", "Repl", "Array", "Signal", "ClockSignal", "ResetSignal", + "Module", + "ClockDomain", + "Elaboratable", "Fragment", "Instance", + "Memory", + "Record", + "DomainRenamer", "ResetInserter", "EnableInserter", +] diff --git a/nmigen/_toolchain/__init__.py b/amaranth/_toolchain/__init__.py similarity index 100% rename from nmigen/_toolchain/__init__.py rename to amaranth/_toolchain/__init__.py diff --git a/nmigen/_toolchain/cxx.py b/amaranth/_toolchain/cxx.py similarity index 97% rename from nmigen/_toolchain/cxx.py rename to amaranth/_toolchain/cxx.py index 439011c..3dc403d 100644 --- a/nmigen/_toolchain/cxx.py +++ b/amaranth/_toolchain/cxx.py @@ -8,7 +8,7 @@ __all__ = ["build_cxx"] def build_cxx(*, cxx_sources, output_name, include_dirs, macros): - build_dir = tempfile.TemporaryDirectory(prefix="nmigen_cxx_") + build_dir = tempfile.TemporaryDirectory(prefix="amaranth_cxx_") cwd = os.getcwd() try: diff --git a/nmigen/_toolchain/yosys.py b/amaranth/_toolchain/yosys.py similarity index 96% rename from nmigen/_toolchain/yosys.py rename to amaranth/_toolchain/yosys.py index c223306..c6acb79 100644 --- a/nmigen/_toolchain/yosys.py +++ b/amaranth/_toolchain/yosys.py @@ -114,7 +114,7 @@ class YosysBinary: class _BuiltinYosys(YosysBinary): - YOSYS_PACKAGE = "nmigen_yosys" + YOSYS_PACKAGE = "amaranth_yosys" @classmethod def available(cls): @@ -205,14 +205,14 @@ def find_yosys(requirement): Raised if required Yosys version is not found. """ proxies = [] - clauses = os.environ.get("NMIGEN_USE_YOSYS", "system,builtin").split(",") + clauses = os.environ.get("AMARANTH_USE_YOSYS", "system,builtin").split(",") for clause in clauses: if clause == "builtin": proxies.append(_BuiltinYosys) elif clause == "system": proxies.append(_SystemYosys) else: - raise YosysError("The NMIGEN_USE_YOSYS environment variable contains " + raise YosysError("The AMARANTH_USE_YOSYS environment variable contains " "an unrecognized clause {!r}" .format(clause)) for proxy in proxies: @@ -221,9 +221,9 @@ def find_yosys(requirement): if version is not None and requirement(version): return proxy else: - if "NMIGEN_USE_YOSYS" in os.environ: + if "AMARANTH_USE_YOSYS" in os.environ: raise YosysError("Could not find an acceptable Yosys binary. Searched: {}" .format(", ".join(clauses))) else: - raise YosysError("Could not find an acceptable Yosys binary. The `nmigen-yosys` PyPI " + raise YosysError("Could not find an acceptable Yosys binary. The `amaranth-yosys` PyPI " "package, if available for this platform, can be used as fallback") diff --git a/nmigen/_unused.py b/amaranth/_unused.py similarity index 100% rename from nmigen/_unused.py rename to amaranth/_unused.py diff --git a/nmigen/_utils.py b/amaranth/_utils.py similarity index 91% rename from nmigen/_utils.py rename to amaranth/_utils.py index d04d720..91e0dd4 100644 --- a/nmigen/_utils.py +++ b/amaranth/_utils.py @@ -90,6 +90,11 @@ def get_linter_options(filename): first_line = linecache.getline(filename, 1) if first_line: match = re.match(r"^#\s*nmigen:\s*((?:\w+=\w+\s*)(?:,\s*\w+=\w+\s*)*)\n$", first_line) + if match: + warnings.warn_explicit("Use `# amaranth:` annotation instead of `# nmigen:`", + DeprecationWarning, filename, 1) + else: + match = re.match(r"^#\s*amaranth:\s*((?:\w+=\w+\s*)(?:,\s*\w+=\w+\s*)*)\n$", first_line) if match: return dict(map(lambda s: s.strip().split("=", 2), match.group(1).split(","))) return dict() diff --git a/amaranth/asserts.py b/amaranth/asserts.py new file mode 100644 index 0000000..b0e97b9 --- /dev/null +++ b/amaranth/asserts.py @@ -0,0 +1,2 @@ +from .hdl.ast import AnyConst, AnySeq, Assert, Assume, Cover +from .hdl.ast import Past, Stable, Rose, Fell, Initial diff --git a/amaranth/back/__init__.py b/amaranth/back/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/amaranth/back/cxxrtl.py b/amaranth/back/cxxrtl.py new file mode 100644 index 0000000..809a136 --- /dev/null +++ b/amaranth/back/cxxrtl.py @@ -0,0 +1,41 @@ +from .._toolchain.yosys import * +from . import rtlil + + +__all__ = ["YosysError", "convert", "convert_fragment"] + + +def _convert_rtlil_text(rtlil_text, black_boxes, *, src_loc_at=0): + if black_boxes is not None: + if not isinstance(black_boxes, dict): + raise TypeError("CXXRTL black boxes must be a dictionary, not {!r}" + .format(black_boxes)) + for box_name, box_source in black_boxes.items(): + if not isinstance(box_name, str): + raise TypeError("CXXRTL black box name must be a string, not {!r}" + .format(box_name)) + if not isinstance(box_source, str): + raise TypeError("CXXRTL black box source code must be a string, not {!r}" + .format(box_source)) + + yosys = find_yosys(lambda ver: ver >= (0, 9, 3468)) + + script = [] + if black_boxes is not None: + for box_name, box_source in black_boxes.items(): + script.append("read_ilang < 2 ** 16: + raise ImplementationLimit("Wire created at {} is {} bits wide, which is unlikely to " + "synthesize correctly" + .format(src or "unknown location", width)) + + self._attributes(attrs, src=src, indent=1) + name = self._make_name(name, local=False) + if port_id is None: + self._append(" wire width {} {}\n", width, name) + else: + assert port_kind in ("input", "output", "inout") + self._append(" wire width {} {} {} {}\n", width, port_kind, port_id, name) + return name + + def connect(self, lhs, rhs): + self._append(" connect {} {}\n", lhs, rhs) + + def memory(self, width, size, name=None, attrs={}, src=""): + self._attributes(attrs, src=src, indent=1) + name = self._make_name(name, local=False) + self._append(" memory width {} size {} {}\n", width, size, name) + return name + + def cell(self, kind, name=None, params={}, ports={}, attrs={}, src=""): + self._attributes(attrs, src=src, indent=1) + name = self._make_name(name, local=False) + self._append(" cell {} {}\n", kind, name) + for param, value in params.items(): + if isinstance(value, float): + self._append(" parameter real \\{} \"{!r}\"\n", + param, value) + elif _signed(value): + self._append(" parameter signed \\{} {}\n", + param, _const(value)) + else: + self._append(" parameter \\{} {}\n", + param, _const(value)) + for port, wire in ports.items(): + self._append(" connect {} {}\n", port, wire) + self._append(" end\n") + return name + + def process(self, name=None, attrs={}, src=""): + name = self._make_name(name, local=True) + return _ProcessBuilder(self, name, attrs, src) + + +class _ProcessBuilder(_BufferedBuilder, _AttrBuilder): + def __init__(self, rtlil, name, attrs, src): + super().__init__() + self.rtlil = rtlil + self.name = name + self.attrs = {} + self.src = src + + def __enter__(self): + self._attributes(self.attrs, src=self.src, indent=1) + self._append(" process {}\n", self.name) + return self + + def __exit__(self, *args): + self._append(" end\n") + self.rtlil._buffer.write(str(self)) + + def case(self): + return _CaseBuilder(self, indent=2) + + def sync(self, kind, cond=None): + return _SyncBuilder(self, kind, cond) + + +class _CaseBuilder(_ProxiedBuilder): + def __init__(self, rtlil, indent): + self.rtlil = rtlil + self.indent = indent + + def __enter__(self): + return self + + def __exit__(self, *args): + pass + + def assign(self, lhs, rhs): + self._append("{}assign {} {}\n", " " * self.indent, lhs, rhs) + + def switch(self, cond, attrs={}, src=""): + return _SwitchBuilder(self.rtlil, cond, attrs, src, self.indent) + + +class _SwitchBuilder(_ProxiedBuilder, _AttrBuilder): + def __init__(self, rtlil, cond, attrs, src, indent): + self.rtlil = rtlil + self.cond = cond + self.attrs = attrs + self.src = src + self.indent = indent + + def __enter__(self): + self._attributes(self.attrs, src=self.src, indent=self.indent) + self._append("{}switch {}\n", " " * self.indent, self.cond) + return self + + def __exit__(self, *args): + self._append("{}end\n", " " * self.indent) + + def case(self, *values, attrs={}, src=""): + self._attributes(attrs, src=src, indent=self.indent + 1) + if values == (): + self._append("{}case\n", " " * (self.indent + 1)) + else: + self._append("{}case {}\n", " " * (self.indent + 1), + ", ".join("{}'{}".format(len(value), value) for value in values)) + return _CaseBuilder(self.rtlil, self.indent + 2) + + +class _SyncBuilder(_ProxiedBuilder): + def __init__(self, rtlil, kind, cond): + self.rtlil = rtlil + self.kind = kind + self.cond = cond + + def __enter__(self): + if self.cond is None: + self._append(" sync {}\n", self.kind) + else: + self._append(" sync {} {}\n", self.kind, self.cond) + return self + + def __exit__(self, *args): + pass + + def update(self, lhs, rhs): + self._append(" update {} {}\n", lhs, rhs) + + +def _src(src_loc): + if src_loc is None: + return None + file, line = src_loc + return "{}:{}".format(file, line) + + +class _LegalizeValue(Exception): + def __init__(self, value, branches, src_loc): + self.value = value + self.branches = list(branches) + self.src_loc = src_loc + + +class _ValueCompilerState: + def __init__(self, rtlil): + self.rtlil = rtlil + self.wires = ast.SignalDict() + self.driven = ast.SignalDict() + self.ports = ast.SignalDict() + self.anys = ast.ValueDict() + + self.expansions = ast.ValueDict() + + def add_driven(self, signal, sync): + self.driven[signal] = sync + + def add_port(self, signal, kind): + assert kind in ("i", "o", "io") + if kind == "i": + kind = "input" + elif kind == "o": + kind = "output" + elif kind == "io": + kind = "inout" + self.ports[signal] = (len(self.ports), kind) + + def resolve(self, signal, prefix=None): + if len(signal) == 0: + return "{ }", "{ }" + + if signal in self.wires: + return self.wires[signal] + + if signal in self.ports: + port_id, port_kind = self.ports[signal] + else: + port_id = port_kind = None + if prefix is not None: + wire_name = "{}_{}".format(prefix, signal.name) + else: + wire_name = signal.name + + attrs = dict(signal.attrs) + if signal._enum_class is not None: + attrs["enum_base_type"] = signal._enum_class.__name__ + for value in signal._enum_class: + attrs["enum_value_{:0{}b}".format(value.value, signal.width)] = value.name + + wire_curr = self.rtlil.wire(width=signal.width, name=wire_name, + port_id=port_id, port_kind=port_kind, + attrs=attrs, src=_src(signal.src_loc)) + if signal in self.driven and self.driven[signal]: + wire_next = self.rtlil.wire(width=signal.width, name=wire_curr + "$next", + src=_src(signal.src_loc)) + else: + wire_next = None + self.wires[signal] = (wire_curr, wire_next) + + return wire_curr, wire_next + + def resolve_curr(self, signal, prefix=None): + wire_curr, wire_next = self.resolve(signal, prefix) + return wire_curr + + def expand(self, value): + if not self.expansions: + return value + return self.expansions.get(value, value) + + @contextmanager + def expand_to(self, value, expansion): + try: + assert value not in self.expansions + self.expansions[value] = expansion + yield + finally: + del self.expansions[value] + + +class _ValueCompiler(xfrm.ValueVisitor): + def __init__(self, state): + self.s = state + + def on_unknown(self, value): + if value is None: + return None + else: + super().on_unknown(value) + + def on_ClockSignal(self, value): + raise NotImplementedError # :nocov: + + def on_ResetSignal(self, value): + raise NotImplementedError # :nocov: + + def on_Sample(self, value): + raise NotImplementedError # :nocov: + + def on_Initial(self, value): + raise NotImplementedError # :nocov: + + def on_Cat(self, value): + return "{{ {} }}".format(" ".join(reversed([self(o) for o in value.parts]))) + + def _prepare_value_for_Slice(self, value): + raise NotImplementedError # :nocov: + + def on_Slice(self, value): + if value.start == 0 and value.stop == len(value.value): + return self(value.value) + + if isinstance(value.value, ast.UserValue): + sigspec = self._prepare_value_for_Slice(value.value._lazy_lower()) + else: + sigspec = self._prepare_value_for_Slice(value.value) + + if value.start == value.stop: + return "{}" + elif value.start + 1 == value.stop: + return "{} [{}]".format(sigspec, value.start) + else: + return "{} [{}:{}]".format(sigspec, value.stop - 1, value.start) + + def on_ArrayProxy(self, value): + index = self.s.expand(value.index) + if isinstance(index, ast.Const): + if index.value < len(value.elems): + elem = value.elems[index.value] + else: + elem = value.elems[-1] + return self.match_shape(elem, *value.shape()) + else: + max_index = 1 << len(value.index) + max_elem = len(value.elems) + raise _LegalizeValue(value.index, range(min(max_index, max_elem)), value.src_loc) + + +class _RHSValueCompiler(_ValueCompiler): + operator_map = { + (1, "~"): "$not", + (1, "-"): "$neg", + (1, "b"): "$reduce_bool", + (1, "r|"): "$reduce_or", + (1, "r&"): "$reduce_and", + (1, "r^"): "$reduce_xor", + (2, "+"): "$add", + (2, "-"): "$sub", + (2, "*"): "$mul", + (2, "//"): "$div", + (2, "%"): "$mod", + (2, "**"): "$pow", + (2, "<<"): "$sshl", + (2, ">>"): "$sshr", + (2, "&"): "$and", + (2, "^"): "$xor", + (2, "|"): "$or", + (2, "=="): "$eq", + (2, "!="): "$ne", + (2, "<"): "$lt", + (2, "<="): "$le", + (2, ">"): "$gt", + (2, ">="): "$ge", + (3, "m"): "$mux", + } + + def on_value(self, value): + return super().on_value(self.s.expand(value)) + + def on_Const(self, value): + return _const(value) + + def on_AnyConst(self, value): + if value in self.s.anys: + return self.s.anys[value] + + res_bits, res_sign = value.shape() + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + self.s.rtlil.cell("$anyconst", ports={ + "\\Y": res, + }, params={ + "WIDTH": res_bits, + }, src=_src(value.src_loc)) + self.s.anys[value] = res + return res + + def on_AnySeq(self, value): + if value in self.s.anys: + return self.s.anys[value] + + res_bits, res_sign = value.shape() + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + self.s.rtlil.cell("$anyseq", ports={ + "\\Y": res, + }, params={ + "WIDTH": res_bits, + }, src=_src(value.src_loc)) + self.s.anys[value] = res + return res + + def on_Signal(self, value): + wire_curr, wire_next = self.s.resolve(value) + return wire_curr + + def on_Operator_unary(self, value): + arg, = value.operands + if value.operator in ("u", "s"): + # These operators don't change the bit pattern, only its interpretation. + return self(arg) + + arg_bits, arg_sign = arg.shape() + res_bits, res_sign = value.shape() + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + self.s.rtlil.cell(self.operator_map[(1, value.operator)], ports={ + "\\A": self(arg), + "\\Y": res, + }, params={ + "A_SIGNED": arg_sign, + "A_WIDTH": arg_bits, + "Y_WIDTH": res_bits, + }, src=_src(value.src_loc)) + return res + + def match_shape(self, value, new_bits, new_sign): + if isinstance(value, ast.Const): + return self(ast.Const(value.value, ast.Shape(new_bits, new_sign))) + + value_bits, value_sign = value.shape() + if new_bits <= value_bits: + return self(ast.Slice(value, 0, new_bits)) + + res = self.s.rtlil.wire(width=new_bits, src=_src(value.src_loc)) + self.s.rtlil.cell("$pos", ports={ + "\\A": self(value), + "\\Y": res, + }, params={ + "A_SIGNED": value_sign, + "A_WIDTH": value_bits, + "Y_WIDTH": new_bits, + }, src=_src(value.src_loc)) + return res + + def on_Operator_binary(self, value): + lhs, rhs = value.operands + lhs_bits, lhs_sign = lhs.shape() + rhs_bits, rhs_sign = rhs.shape() + if lhs_sign == rhs_sign or value.operator in ("<<", ">>", "**"): + lhs_wire = self(lhs) + rhs_wire = self(rhs) + else: + lhs_sign = rhs_sign = True + lhs_bits = rhs_bits = max(lhs_bits, rhs_bits) + lhs_wire = self.match_shape(lhs, lhs_bits, lhs_sign) + rhs_wire = self.match_shape(rhs, rhs_bits, rhs_sign) + res_bits, res_sign = value.shape() + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + self.s.rtlil.cell(self.operator_map[(2, value.operator)], ports={ + "\\A": lhs_wire, + "\\B": rhs_wire, + "\\Y": res, + }, params={ + "A_SIGNED": lhs_sign, + "A_WIDTH": lhs_bits, + "B_SIGNED": rhs_sign, + "B_WIDTH": rhs_bits, + "Y_WIDTH": res_bits, + }, src=_src(value.src_loc)) + if value.operator in ("//", "%"): + # RTLIL leaves division by zero undefined, but we require it to return zero. + divmod_res = res + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + self.s.rtlil.cell("$mux", ports={ + "\\A": divmod_res, + "\\B": self(ast.Const(0, ast.Shape(res_bits, res_sign))), + "\\S": self(rhs == 0), + "\\Y": res, + }, params={ + "WIDTH": res_bits + }, src=_src(value.src_loc)) + return res + + def on_Operator_mux(self, value): + sel, val1, val0 = value.operands + if len(sel) != 1: + sel = sel.bool() + val1_bits, val1_sign = val1.shape() + val0_bits, val0_sign = val0.shape() + res_bits, res_sign = value.shape() + val1_bits = val0_bits = res_bits = max(val1_bits, val0_bits, res_bits) + val1_wire = self.match_shape(val1, val1_bits, val1_sign) + val0_wire = self.match_shape(val0, val0_bits, val0_sign) + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + self.s.rtlil.cell("$mux", ports={ + "\\A": val0_wire, + "\\B": val1_wire, + "\\S": self(sel), + "\\Y": res, + }, params={ + "WIDTH": res_bits + }, src=_src(value.src_loc)) + return res + + def on_Operator(self, value): + if len(value.operands) == 1: + return self.on_Operator_unary(value) + elif len(value.operands) == 2: + return self.on_Operator_binary(value) + elif len(value.operands) == 3: + assert value.operator == "m" + return self.on_Operator_mux(value) + else: + raise TypeError # :nocov: + + def _prepare_value_for_Slice(self, value): + if isinstance(value, (ast.Signal, ast.Slice, ast.Cat)): + sigspec = self(value) + else: + sigspec = self.s.rtlil.wire(len(value), src=_src(value.src_loc)) + self.s.rtlil.connect(sigspec, self(value)) + return sigspec + + def on_Part(self, value): + lhs, rhs = value.value, value.offset + if value.stride != 1: + rhs *= value.stride + lhs_bits, lhs_sign = lhs.shape() + rhs_bits, rhs_sign = rhs.shape() + res_bits, res_sign = value.shape() + res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) + # Note: Verilog's x[o+:w] construct produces a $shiftx cell, not a $shift cell. + # However, Amaranth's semantics defines the out-of-range bits to be zero, so it is correct + # to use a $shift cell here instead, even though it produces less idiomatic Verilog. + self.s.rtlil.cell("$shift", ports={ + "\\A": self(lhs), + "\\B": self(rhs), + "\\Y": res, + }, params={ + "A_SIGNED": lhs_sign, + "A_WIDTH": lhs_bits, + "B_SIGNED": rhs_sign, + "B_WIDTH": rhs_bits, + "Y_WIDTH": res_bits, + }, src=_src(value.src_loc)) + return res + + def on_Repl(self, value): + return "{{ {} }}".format(" ".join(self(value.value) for _ in range(value.count))) + + +class _LHSValueCompiler(_ValueCompiler): + def on_Const(self, value): + raise TypeError # :nocov: + + def on_AnyConst(self, value): + raise TypeError # :nocov: + + def on_AnySeq(self, value): + raise TypeError # :nocov: + + def on_Operator(self, value): + raise TypeError # :nocov: + + def match_shape(self, value, new_bits, new_sign): + value_bits, value_sign = value.shape() + if new_bits == value_bits: + return self(value) + elif new_bits < value_bits: + return self(ast.Slice(value, 0, new_bits)) + else: # new_bits > value_bits + dummy_bits = new_bits - value_bits + dummy_wire = self.s.rtlil.wire(dummy_bits) + return "{{ {} {} }}".format(dummy_wire, self(value)) + + def on_Signal(self, value): + if value not in self.s.driven: + raise ValueError("No LHS wire for non-driven signal {}".format(repr(value))) + wire_curr, wire_next = self.s.resolve(value) + return wire_next or wire_curr + + def _prepare_value_for_Slice(self, value): + assert isinstance(value, (ast.Signal, ast.Slice, ast.Cat)) + return self(value) + + def on_Part(self, value): + offset = self.s.expand(value.offset) + if isinstance(offset, ast.Const): + start = offset.value * value.stride + stop = start + value.width + slice = self(ast.Slice(value.value, start, min(len(value.value), stop))) + if len(value.value) >= stop: + return slice + else: + dummy_wire = self.s.rtlil.wire(stop - len(value.value)) + return "{{ {} {} }}".format(dummy_wire, slice) + else: + # Only so many possible parts. The amount of branches is exponential; if value.offset + # is large (e.g. 32-bit wide), trying to naively legalize it is likely to exhaust + # system resources. + max_branches = len(value.value) // value.stride + 1 + raise _LegalizeValue(value.offset, + range(1 << len(value.offset))[:max_branches], + value.src_loc) + + def on_Repl(self, value): + raise TypeError # :nocov: + + +class _StatementCompiler(xfrm.StatementVisitor): + def __init__(self, state, rhs_compiler, lhs_compiler): + self.state = state + self.rhs_compiler = rhs_compiler + self.lhs_compiler = lhs_compiler + + self._case = None + self._test_cache = {} + self._has_rhs = False + self._wrap_assign = False + + @contextmanager + def case(self, switch, values, attrs={}, src=""): + try: + old_case = self._case + with switch.case(*values, attrs=attrs, src=src) as self._case: + yield + finally: + self._case = old_case + + def _check_rhs(self, value): + if self._has_rhs or next(iter(value._rhs_signals()), None) is not None: + self._has_rhs = True + + def on_Assign(self, stmt): + self._check_rhs(stmt.rhs) + + lhs_bits, lhs_sign = stmt.lhs.shape() + rhs_bits, rhs_sign = stmt.rhs.shape() + if lhs_bits == rhs_bits: + rhs_sigspec = self.rhs_compiler(stmt.rhs) + else: + # In RTLIL, LHS and RHS of assignment must have exactly same width. + rhs_sigspec = self.rhs_compiler.match_shape( + stmt.rhs, lhs_bits, lhs_sign) + if self._wrap_assign: + # In RTLIL, all assigns are logically sequenced before all switches, even if they are + # interleaved in the source. In Amaranth, the source ordering is used. To handle this + # mismatch, we wrap all assigns following a switch in a dummy switch. + with self._case.switch("{ }") as wrap_switch: + with wrap_switch.case() as wrap_case: + wrap_case.assign(self.lhs_compiler(stmt.lhs), rhs_sigspec) + else: + self._case.assign(self.lhs_compiler(stmt.lhs), rhs_sigspec) + + def on_property(self, stmt): + self(stmt._check.eq(stmt.test)) + self(stmt._en.eq(1)) + + en_wire = self.rhs_compiler(stmt._en) + check_wire = self.rhs_compiler(stmt._check) + self.state.rtlil.cell("$" + stmt._kind, ports={ + "\\A": check_wire, + "\\EN": en_wire, + }, src=_src(stmt.src_loc)) + + on_Assert = on_property + on_Assume = on_property + on_Cover = on_property + + def on_Switch(self, stmt): + self._check_rhs(stmt.test) + + if not self.state.expansions: + # We repeatedly translate the same switches over and over (see the LHSGroupAnalyzer + # related code below), and translating the switch test only once helps readability. + if stmt not in self._test_cache: + self._test_cache[stmt] = self.rhs_compiler(stmt.test) + test_sigspec = self._test_cache[stmt] + else: + # However, if the switch test contains an illegal value, then it may not be cached + # (since the illegal value will be repeatedly replaced with different constants), so + # don't cache anything in that case. + test_sigspec = self.rhs_compiler(stmt.test) + + with self._case.switch(test_sigspec, src=_src(stmt.src_loc)) as switch: + for values, stmts in stmt.cases.items(): + case_attrs = {} + if values in stmt.case_src_locs: + case_attrs["src"] = _src(stmt.case_src_locs[values]) + if isinstance(stmt.test, ast.Signal) and stmt.test.decoder: + decoded_values = [] + for value in values: + if "-" in value: + decoded_values.append("") + else: + decoded_values.append(stmt.test.decoder(int(value, 2))) + case_attrs["amaranth.decoding"] = "|".join(decoded_values) + with self.case(switch, values, attrs=case_attrs): + self._wrap_assign = False + self.on_statements(stmts) + self._wrap_assign = True + + def on_statement(self, stmt): + try: + super().on_statement(stmt) + except _LegalizeValue as legalize: + with self._case.switch(self.rhs_compiler(legalize.value), + src=_src(legalize.src_loc)) as switch: + shape = legalize.value.shape() + tests = ["{:0{}b}".format(v, shape.width) for v in legalize.branches] + if tests: + tests[-1] = "-" * shape.width + for branch, test in zip(legalize.branches, tests): + with self.case(switch, (test,)): + self._wrap_assign = False + branch_value = ast.Const(branch, shape) + with self.state.expand_to(legalize.value, branch_value): + self.on_statement(stmt) + self._wrap_assign = True + + def on_statements(self, stmts): + for stmt in stmts: + self.on_statement(stmt) + + +def _convert_fragment(builder, fragment, name_map, hierarchy): + if isinstance(fragment, ir.Instance): + port_map = OrderedDict() + for port_name, (value, dir) in fragment.named_ports.items(): + port_map["\\{}".format(port_name)] = value + + if fragment.type[0] == "$": + return fragment.type, port_map + else: + return "\\{}".format(fragment.type), port_map + + module_name = hierarchy[-1] or "anonymous" + module_attrs = OrderedDict() + if len(hierarchy) == 1: + module_attrs["top"] = 1 + module_attrs["amaranth.hierarchy"] = ".".join(name or "anonymous" for name in hierarchy) + + with builder.module(module_name, attrs=module_attrs) as module: + compiler_state = _ValueCompilerState(module) + rhs_compiler = _RHSValueCompiler(compiler_state) + lhs_compiler = _LHSValueCompiler(compiler_state) + stmt_compiler = _StatementCompiler(compiler_state, rhs_compiler, lhs_compiler) + + verilog_trigger = None + verilog_trigger_sync_emitted = False + + # If the fragment is completely empty, add a dummy wire to it, or Yosys will interpret + # it as a black box by default (when read as Verilog). + if not fragment.ports and not fragment.statements and not fragment.subfragments: + module.wire(1, name="$empty_module_filler") + + # Register all signals driven in the current fragment. This must be done first, as it + # affects further codegen; e.g. whether \sig$next signals will be generated and used. + for domain, signal in fragment.iter_drivers(): + compiler_state.add_driven(signal, sync=domain is not None) + + # Transform all signals used as ports in the current fragment eagerly and outside of + # any hierarchy, to make sure they get sensible (non-prefixed) names. + for signal in fragment.ports: + compiler_state.add_port(signal, fragment.ports[signal]) + compiler_state.resolve_curr(signal) + + # Transform all clocks clocks and resets eagerly and outside of any hierarchy, to make + # sure they get sensible (non-prefixed) names. This does not affect semantics. + for domain, _ in fragment.iter_sync(): + cd = fragment.domains[domain] + compiler_state.resolve_curr(cd.clk) + if cd.rst is not None: + compiler_state.resolve_curr(cd.rst) + + # Transform all subfragments to their respective cells. Transforming signals connected + # to their ports into wires eagerly makes sure they get sensible (prefixed with submodule + # name) names. + memories = OrderedDict() + for subfragment, sub_name in fragment.subfragments: + if sub_name is None: + sub_name = module.anonymous() + + sub_params = OrderedDict() + if hasattr(subfragment, "parameters"): + for param_name, param_value in subfragment.parameters.items(): + if isinstance(param_value, mem.Memory): + memory = param_value + if memory not in memories: + memories[memory] = module.memory(width=memory.width, size=memory.depth, + name=memory.name, attrs=memory.attrs) + addr_bits = bits_for(memory.depth) + data_parts = [] + data_mask = (1 << memory.width) - 1 + for addr in range(memory.depth): + if addr < len(memory.init): + data = memory.init[addr] & data_mask + else: + data = 0 + data_parts.append("{:0{}b}".format(data, memory.width)) + module.cell("$meminit", ports={ + "\\ADDR": rhs_compiler(ast.Const(0, addr_bits)), + "\\DATA": "{}'".format(memory.width * memory.depth) + + "".join(reversed(data_parts)), + }, params={ + "MEMID": memories[memory], + "ABITS": addr_bits, + "WIDTH": memory.width, + "WORDS": memory.depth, + "PRIORITY": 0, + }) + + param_value = memories[memory] + + sub_params[param_name] = param_value + + sub_type, sub_port_map = \ + _convert_fragment(builder, subfragment, name_map, + hierarchy=hierarchy + (sub_name,)) + + sub_ports = OrderedDict() + for port, value in sub_port_map.items(): + if not isinstance(subfragment, ir.Instance): + for signal in value._rhs_signals(): + compiler_state.resolve_curr(signal, prefix=sub_name) + if len(value) > 0: + sub_ports[port] = rhs_compiler(value) + + module.cell(sub_type, name=sub_name, ports=sub_ports, params=sub_params, + attrs=subfragment.attrs) + + # If we emit all of our combinatorial logic into a single RTLIL process, Verilog + # simulators will break horribly, because Yosys write_verilog transforms RTLIL processes + # into always @* blocks with blocking assignment, and that does not create delta cycles. + # + # Therefore, we translate the fragment as many times as there are independent groups + # of signals (a group is a transitive closure of signals that appear together on LHS), + # splitting them into many RTLIL (and thus Verilog) processes. + lhs_grouper = xfrm.LHSGroupAnalyzer() + lhs_grouper.on_statements(fragment.statements) + + for group, group_signals in lhs_grouper.groups().items(): + lhs_group_filter = xfrm.LHSGroupFilter(group_signals) + group_stmts = lhs_group_filter(fragment.statements) + + with module.process(name="$group_{}".format(group)) as process: + with process.case() as case: + # For every signal in comb domain, assign \sig$next to the reset value. + # For every signal in sync domains, assign \sig$next to the current + # value (\sig). + for domain, signal in fragment.iter_drivers(): + if signal not in group_signals: + continue + if domain is None: + prev_value = ast.Const(signal.reset, signal.width) + else: + prev_value = signal + case.assign(lhs_compiler(signal), rhs_compiler(prev_value)) + + # Convert statements into decision trees. + stmt_compiler._case = case + stmt_compiler._has_rhs = False + stmt_compiler._wrap_assign = False + stmt_compiler(group_stmts) + + # Verilog `always @*` blocks will not run if `*` does not match anything, i.e. + # if the implicit sensitivity list is empty. We check this while translating, + # by looking for any signals on RHS. If there aren't any, we add some logic + # whose only purpose is to trigger Verilog simulators when it converts + # through RTLIL and to Verilog, by populating the sensitivity list. + # + # Unfortunately, while this workaround allows true (event-driven) Verilog + # simulators to work properly, and is universally ignored by synthesizers, + # Verilator rejects it. + # + # Yosys >=0.9+3468 emits a better workaround on its own, so this code can be + # removed completely once support for Yosys 0.9 is dropped. + if not stmt_compiler._has_rhs: + if verilog_trigger is None: + verilog_trigger = \ + module.wire(1, name="$verilog_initial_trigger") + case.assign(verilog_trigger, verilog_trigger) + + # For every signal in the sync domain, assign \sig's initial value (which will + # end up as the \init reg attribute) to the reset value. + with process.sync("init") as sync: + for domain, signal in fragment.iter_sync(): + if signal not in group_signals: + continue + wire_curr, wire_next = compiler_state.resolve(signal) + sync.update(wire_curr, rhs_compiler(ast.Const(signal.reset, signal.width))) + + # The Verilog simulator trigger needs to change at time 0, so if we haven't + # yet done that in some process, do it. + if verilog_trigger and not verilog_trigger_sync_emitted: + sync.update(verilog_trigger, "1'0") + verilog_trigger_sync_emitted = True + + # For every signal in every sync domain, assign \sig to \sig$next. The sensitivity + # list, however, differs between domains: for domains with sync reset, it is + # `[pos|neg]edge clk`, for sync domains with async reset it is `[pos|neg]edge clk + # or posedge rst`. + for domain, signals in fragment.drivers.items(): + if domain is None: + continue + + signals = signals & group_signals + if not signals: + continue + + cd = fragment.domains[domain] + + triggers = [] + triggers.append((cd.clk_edge + "edge", compiler_state.resolve_curr(cd.clk))) + if cd.async_reset: + triggers.append(("posedge", compiler_state.resolve_curr(cd.rst))) + + for trigger in triggers: + with process.sync(*trigger) as sync: + for signal in signals: + wire_curr, wire_next = compiler_state.resolve(signal) + sync.update(wire_curr, wire_next) + + # Any signals that are used but neither driven nor connected to an input port always + # assume their reset values. We need to assign the reset value explicitly, since only + # driven sync signals are handled by the logic above. + # + # Because this assignment is done at a late stage, a single Signal object can get assigned + # many times, once in each module it is used. This is a deliberate decision; the possible + # alternatives are to add ports for undriven signals (which requires choosing one module + # to drive it to reset value arbitrarily) or to replace them with their reset value (which + # removes valuable source location information). + driven = ast.SignalSet() + for domain, signals in fragment.iter_drivers(): + driven.update(flatten(signal._lhs_signals() for signal in signals)) + driven.update(fragment.iter_ports(dir="i")) + driven.update(fragment.iter_ports(dir="io")) + for subfragment, sub_name in fragment.subfragments: + driven.update(subfragment.iter_ports(dir="o")) + driven.update(subfragment.iter_ports(dir="io")) + + for wire in compiler_state.wires: + if wire in driven: + continue + wire_curr, _ = compiler_state.wires[wire] + module.connect(wire_curr, rhs_compiler(ast.Const(wire.reset, wire.width))) + + # Collect the names we've given to our ports in RTLIL, and correlate these with the signals + # represented by these ports. If we are a submodule, this will be necessary to create a cell + # for us in the parent module. + port_map = OrderedDict() + for signal in fragment.ports: + port_map[compiler_state.resolve_curr(signal)] = signal + + # Finally, collect tha names we've given to each wire in RTLIL, and provide these to + # the caller, to allow manipulating them in the toolchain. + for signal in compiler_state.wires: + wire_name = compiler_state.resolve_curr(signal) + if wire_name.startswith("\\"): + wire_name = wire_name[1:] + name_map[signal] = hierarchy + (wire_name,) + + return module.name, port_map + + +def convert_fragment(fragment, name="top"): + assert isinstance(fragment, ir.Fragment) + builder = _Builder() + name_map = ast.SignalDict() + _convert_fragment(builder, fragment, name_map, hierarchy=(name,)) + return str(builder), name_map + + +def convert(elaboratable, name="top", platform=None, **kwargs): + fragment = ir.Fragment.get(elaboratable, platform).prepare(**kwargs) + il_text, name_map = convert_fragment(fragment, name) + return il_text diff --git a/amaranth/back/verilog.py b/amaranth/back/verilog.py new file mode 100644 index 0000000..423e67c --- /dev/null +++ b/amaranth/back/verilog.py @@ -0,0 +1,61 @@ +from .._toolchain.yosys import * +from . import rtlil + + +__all__ = ["YosysError", "convert", "convert_fragment"] + + +def _convert_rtlil_text(rtlil_text, *, strip_internal_attrs=False, write_verilog_opts=()): + # this version requirement needs to be synchronized with the one in setup.py! + yosys = find_yosys(lambda ver: ver >= (0, 9)) + yosys_version = yosys.version() + + script = [] + script.append("read_ilang <= (0, 9, 3468): + # Yosys >=0.9+3468 (since commit 128522f1) emits the workaround for the `always @*` + # initial scheduling issue on its own. + script.append("delete w:$verilog_initial_trigger") + + if yosys_version >= (0, 9, 3527): + # Yosys >=0.9+3527 (since commit 656ee70f) supports the `-nomux` option for the `proc` + # script pass. Because the individual `proc_*` passes are not a stable interface, + # `proc -nomux` is used instead, if available. + script.append("proc -nomux") + else: + # On earlier versions, use individual `proc_*` passes; this is a known range of Yosys + # versions and we know it's compatible with what Amaranth does. + script.append("proc_init") + script.append("proc_arst") + script.append("proc_dff") + script.append("proc_clean") + script.append("memory_collect") + + if strip_internal_attrs: + attr_map = [] + attr_map.append("-remove generator") + attr_map.append("-remove top") + attr_map.append("-remove src") + attr_map.append("-remove amaranth.hierarchy") + attr_map.append("-remove amaranth.decoding") + script.append("attrmap {}".format(" ".join(attr_map))) + script.append("attrmap -modattr {}".format(" ".join(attr_map))) + + script.append("write_verilog -norename {}".format(" ".join(write_verilog_opts))) + + return yosys.run(["-q", "-"], "\n".join(script), + # At the moment, Yosys always shows a warning indicating that not all processes can be + # translated to Verilog. We carefully emit only the processes that *can* be translated, and + # squash this warning. Once Yosys' write_verilog pass is fixed, we should remove this. + ignore_warnings=True) + + +def convert_fragment(*args, strip_internal_attrs=False, **kwargs): + rtlil_text, name_map = rtlil.convert_fragment(*args, **kwargs) + return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs), name_map + + +def convert(*args, strip_internal_attrs=False, **kwargs): + rtlil_text = rtlil.convert(*args, **kwargs) + return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs) diff --git a/amaranth/build/__init__.py b/amaranth/build/__init__.py new file mode 100644 index 0000000..c4bc9f3 --- /dev/null +++ b/amaranth/build/__init__.py @@ -0,0 +1,3 @@ +from .dsl import * +from .res import ResourceError +from .plat import * diff --git a/amaranth/build/dsl.py b/amaranth/build/dsl.py new file mode 100644 index 0000000..3b445f6 --- /dev/null +++ b/amaranth/build/dsl.py @@ -0,0 +1,256 @@ +from collections import OrderedDict + + +__all__ = ["Pins", "PinsN", "DiffPairs", "DiffPairsN", + "Attrs", "Clock", "Subsignal", "Resource", "Connector"] + + +class Pins: + def __init__(self, names, *, dir="io", invert=False, conn=None, assert_width=None): + if not isinstance(names, str): + raise TypeError("Names must be a whitespace-separated string, not {!r}" + .format(names)) + names = names.split() + + if conn is not None: + conn_name, conn_number = conn + if not (isinstance(conn_name, str) and isinstance(conn_number, (int, str))): + raise TypeError("Connector must be None or a pair of string (connector name) and " + "integer/string (connector number), not {!r}" + .format(conn)) + names = ["{}_{}:{}".format(conn_name, conn_number, name) for name in names] + + if dir not in ("i", "o", "io", "oe"): + raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", or \"io\", not {!r}" + .format(dir)) + + if assert_width is not None and len(names) != assert_width: + raise AssertionError("{} names are specified ({}), but {} names are expected" + .format(len(names), " ".join(names), assert_width)) + + self.names = names + self.dir = dir + self.invert = bool(invert) + + def __len__(self): + return len(self.names) + + def __iter__(self): + return iter(self.names) + + def map_names(self, mapping, resource): + mapped_names = [] + for name in self.names: + while ":" in name: + if name not in mapping: + raise NameError("Resource {!r} refers to nonexistent connector pin {}" + .format(resource, name)) + name = mapping[name] + mapped_names.append(name) + return mapped_names + + def __repr__(self): + return "(pins{} {} {})".format("-n" if self.invert else "", + self.dir, " ".join(self.names)) + + +def PinsN(*args, **kwargs): + return Pins(*args, invert=True, **kwargs) + + +class DiffPairs: + def __init__(self, p, n, *, dir="io", invert=False, conn=None, assert_width=None): + self.p = Pins(p, dir=dir, conn=conn, assert_width=assert_width) + self.n = Pins(n, dir=dir, conn=conn, assert_width=assert_width) + + if len(self.p.names) != len(self.n.names): + raise TypeError("Positive and negative pins must have the same width, but {!r} " + "and {!r} do not" + .format(self.p, self.n)) + + self.dir = dir + self.invert = bool(invert) + + def __len__(self): + return len(self.p.names) + + def __iter__(self): + return zip(self.p.names, self.n.names) + + def __repr__(self): + return "(diffpairs{} {} (p {}) (n {}))".format("-n" if self.invert else "", + self.dir, " ".join(self.p.names), " ".join(self.n.names)) + + +def DiffPairsN(*args, **kwargs): + return DiffPairs(*args, invert=True, **kwargs) + + +class Attrs(OrderedDict): + def __init__(self, **attrs): + for key, value in attrs.items(): + if not (value is None or isinstance(value, (str, int)) or hasattr(value, "__call__")): + raise TypeError("Value of attribute {} must be None, int, str, or callable, " + "not {!r}" + .format(key, value)) + + super().__init__(**attrs) + + def __repr__(self): + items = [] + for key, value in self.items(): + if value is None: + items.append("!" + key) + else: + items.append(key + "=" + repr(value)) + return "(attrs {})".format(" ".join(items)) + + +class Clock: + def __init__(self, frequency): + if not isinstance(frequency, (float, int)): + raise TypeError("Clock frequency must be a number") + + self.frequency = float(frequency) + + @property + def period(self): + return 1 / self.frequency + + def __repr__(self): + return "(clock {})".format(self.frequency) + + +class Subsignal: + def __init__(self, name, *args): + self.name = name + self.ios = [] + self.attrs = Attrs() + self.clock = None + + if not args: + raise ValueError("Missing I/O constraints") + for arg in args: + if isinstance(arg, (Pins, DiffPairs)): + if not self.ios: + self.ios.append(arg) + else: + raise TypeError("Pins and DiffPairs are incompatible with other location or " + "subsignal constraints, but {!r} appears after {!r}" + .format(arg, self.ios[-1])) + elif isinstance(arg, Subsignal): + if not self.ios or isinstance(self.ios[-1], Subsignal): + self.ios.append(arg) + else: + raise TypeError("Subsignal is incompatible with location constraints, but " + "{!r} appears after {!r}" + .format(arg, self.ios[-1])) + elif isinstance(arg, Attrs): + self.attrs.update(arg) + elif isinstance(arg, Clock): + if self.ios and isinstance(self.ios[-1], (Pins, DiffPairs)): + if self.clock is None: + self.clock = arg + else: + raise ValueError("Clock constraint can be applied only once") + else: + raise TypeError("Clock constraint can only be applied to Pins or DiffPairs, " + "not {!r}" + .format(self.ios[-1])) + else: + raise TypeError("Constraint must be one of Pins, DiffPairs, Subsignal, Attrs, " + "or Clock, not {!r}" + .format(arg)) + + def _content_repr(self): + parts = [] + for io in self.ios: + parts.append(repr(io)) + if self.clock is not None: + parts.append(repr(self.clock)) + if self.attrs: + parts.append(repr(self.attrs)) + return " ".join(parts) + + def __repr__(self): + return "(subsignal {} {})".format(self.name, self._content_repr()) + + +class Resource(Subsignal): + @classmethod + def family(cls, name_or_number, number=None, *, ios, default_name, name_suffix=""): + # This constructor accepts two different forms: + # 1. Number-only form: + # Resource.family(0, default_name="name", ios=[Pins("A0 A1")]) + # 2. Name-and-number (name override) form: + # Resource.family("override", 0, default_name="name", ios=...) + # This makes it easier to build abstractions for resources, e.g. an SPIResource abstraction + # could simply delegate to `Resource.family(*args, default_name="spi", ios=ios)`. + # The name_suffix argument is meant to support creating resources with + # similar names, such as spi_flash, spi_flash_2x, etc. + if name_suffix: # Only add "_" if we actually have a suffix. + name_suffix = "_" + name_suffix + + if number is None: # name_or_number is number + return cls(default_name + name_suffix, name_or_number, *ios) + else: # name_or_number is name + return cls(name_or_number + name_suffix, number, *ios) + + def __init__(self, name, number, *args): + super().__init__(name, *args) + + self.number = number + + def __repr__(self): + return "(resource {} {} {})".format(self.name, self.number, self._content_repr()) + + +class Connector: + def __init__(self, name, number, io, *, conn=None): + self.name = name + self.number = number + mapping = OrderedDict() + + if isinstance(io, dict): + for conn_pin, plat_pin in io.items(): + if not isinstance(conn_pin, str): + raise TypeError("Connector pin name must be a string, not {!r}" + .format(conn_pin)) + if not isinstance(plat_pin, str): + raise TypeError("Platform pin name must be a string, not {!r}" + .format(plat_pin)) + mapping[conn_pin] = plat_pin + + elif isinstance(io, str): + for conn_pin, plat_pin in enumerate(io.split(), start=1): + if plat_pin == "-": + continue + + mapping[str(conn_pin)] = plat_pin + else: + raise TypeError("Connector I/Os must be a dictionary or a string, not {!r}" + .format(io)) + + if conn is not None: + conn_name, conn_number = conn + if not (isinstance(conn_name, str) and isinstance(conn_number, (int, str))): + raise TypeError("Connector must be None or a pair of string (connector name) and " + "integer/string (connector number), not {!r}" + .format(conn)) + + for conn_pin, plat_pin in mapping.items(): + mapping[conn_pin] = "{}_{}:{}".format(conn_name, conn_number, plat_pin) + + self.mapping = mapping + + def __repr__(self): + return "(connector {} {} {})".format(self.name, self.number, + " ".join("{}=>{}".format(conn, plat) + for conn, plat in self.mapping.items())) + + def __len__(self): + return len(self.mapping) + + def __iter__(self): + for conn_pin, plat_pin in self.mapping.items(): + yield "{}_{}:{}".format(self.name, self.number, conn_pin), plat_pin diff --git a/amaranth/build/plat.py b/amaranth/build/plat.py new file mode 100644 index 0000000..49d69ac --- /dev/null +++ b/amaranth/build/plat.py @@ -0,0 +1,457 @@ +from collections import OrderedDict +from abc import ABCMeta, abstractmethod, abstractproperty +import os +import textwrap +import re +import jinja2 + +from .. import __version__ +from .._toolchain import * +from ..hdl import * +from ..hdl.xfrm import SampleLowerer, DomainLowerer +from ..lib.cdc import ResetSynchronizer +from ..back import rtlil, verilog +from .res import * +from .run import * + + +__all__ = ["Platform", "TemplatedPlatform"] + + +class Platform(ResourceManager, metaclass=ABCMeta): + resources = abstractproperty() + connectors = abstractproperty() + default_clk = None + default_rst = None + required_tools = abstractproperty() + + def __init__(self): + super().__init__(self.resources, self.connectors) + + self.extra_files = OrderedDict() + + self._prepared = False + + @property + def default_clk_constraint(self): + if self.default_clk is None: + raise AttributeError("Platform '{}' does not define a default clock" + .format(type(self).__name__)) + return self.lookup(self.default_clk).clock + + @property + def default_clk_frequency(self): + constraint = self.default_clk_constraint + if constraint is None: + raise AttributeError("Platform '{}' does not constrain its default clock" + .format(type(self).__name__)) + return constraint.frequency + + def add_file(self, filename, content): + if not isinstance(filename, str): + raise TypeError("File name must be a string, not {!r}" + .format(filename)) + if hasattr(content, "read"): + content = content.read() + elif not isinstance(content, (str, bytes)): + raise TypeError("File contents must be str, bytes, or a file-like object, not {!r}" + .format(content)) + if filename in self.extra_files: + if self.extra_files[filename] != content: + raise ValueError("File {!r} already exists" + .format(filename)) + else: + self.extra_files[filename] = content + + def iter_files(self, *suffixes): + for filename in self.extra_files: + if filename.endswith(suffixes): + yield filename + + @property + def _deprecated_toolchain_env_var(self): + return f"NMIGEN_ENV_{self.toolchain}" + + @property + def _toolchain_env_var(self): + return f"AMARANTH_ENV_{self.toolchain}" + + def build(self, elaboratable, name="top", + build_dir="build", do_build=True, + program_opts=None, do_program=False, + **kwargs): + # The following code performs a best-effort check for presence of required tools upfront, + # before performing any build actions, to provide a better diagnostic. It does not handle + # several corner cases: + # 1. `require_tool` does not source toolchain environment scripts, so if such a script + # is used, the check is skipped, and `execute_local()` may fail; + # 2. if the design is not built (do_build=False), most of the tools are not required and + # in fact might not be available if the design will be built manually with a different + # environment script specified, or on a different machine; however, Yosys is required + # by virtually every platform anyway, to provide debug Verilog output, and `prepare()` + # may fail. + # This is OK because even if `require_tool` succeeds, the toolchain might be broken anyway. + # The check only serves to catch common errors earlier. + if do_build and (self._deprecated_toolchain_env_var not in os.environ or + self._toolchain_env_var not in os.environ): + for tool in self.required_tools: + require_tool(tool) + + plan = self.prepare(elaboratable, name, **kwargs) + if not do_build: + return plan + + products = plan.execute_local(build_dir) + if not do_program: + return products + + self.toolchain_program(products, name, **(program_opts or {})) + + def has_required_tools(self): + if (self._deprecated_toolchain_env_var in os.environ or + self._toolchain_env_var in os.environ): + return True + return all(has_tool(name) for name in self.required_tools) + + def create_missing_domain(self, name): + # Simple instantiation of a clock domain driven directly by the board clock and reset. + # This implementation uses a single ResetSynchronizer to ensure that: + # * an external reset is definitely synchronized to the system clock; + # * release of power-on reset, which is inherently asynchronous, is synchronized to + # the system clock. + # Many device families provide advanced primitives for tackling reset. If these exist, + # they should be used instead. + if name == "sync" and self.default_clk is not None: + clk_i = self.request(self.default_clk).i + if self.default_rst is not None: + rst_i = self.request(self.default_rst).i + else: + rst_i = Const(0) + + m = Module() + m.domains += ClockDomain("sync") + m.d.comb += ClockSignal("sync").eq(clk_i) + m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync") + return m + + def prepare(self, elaboratable, name="top", **kwargs): + assert not self._prepared + self._prepared = True + + fragment = Fragment.get(elaboratable, self) + fragment = SampleLowerer()(fragment) + fragment._propagate_domains(self.create_missing_domain, platform=self) + fragment = DomainLowerer()(fragment) + + def add_pin_fragment(pin, pin_fragment): + pin_fragment = Fragment.get(pin_fragment, self) + if not isinstance(pin_fragment, Instance): + pin_fragment.flatten = True + fragment.add_subfragment(pin_fragment, name="pin_{}".format(pin.name)) + + for pin, port, attrs, invert in self.iter_single_ended_pins(): + if pin.dir == "i": + add_pin_fragment(pin, self.get_input(pin, port, attrs, invert)) + if pin.dir == "o": + add_pin_fragment(pin, self.get_output(pin, port, attrs, invert)) + if pin.dir == "oe": + add_pin_fragment(pin, self.get_tristate(pin, port, attrs, invert)) + if pin.dir == "io": + add_pin_fragment(pin, self.get_input_output(pin, port, attrs, invert)) + + for pin, port, attrs, invert in self.iter_differential_pins(): + if pin.dir == "i": + add_pin_fragment(pin, self.get_diff_input(pin, port, attrs, invert)) + if pin.dir == "o": + add_pin_fragment(pin, self.get_diff_output(pin, port, attrs, invert)) + if pin.dir == "oe": + add_pin_fragment(pin, self.get_diff_tristate(pin, port, attrs, invert)) + if pin.dir == "io": + add_pin_fragment(pin, self.get_diff_input_output(pin, port, attrs, invert)) + + fragment._propagate_ports(ports=self.iter_ports(), all_undef_as_ports=False) + return self.toolchain_prepare(fragment, name, **kwargs) + + @abstractmethod + def toolchain_prepare(self, fragment, name, **kwargs): + """ + Convert the ``fragment`` and constraints recorded in this :class:`Platform` into + a :class:`BuildPlan`. + """ + raise NotImplementedError # :nocov: + + def toolchain_program(self, products, name, **kwargs): + """ + Extract bitstream for fragment ``name`` from ``products`` and download it to a target. + """ + raise NotImplementedError("Platform '{}' does not support programming" + .format(type(self).__name__)) + + def _check_feature(self, feature, pin, attrs, valid_xdrs, valid_attrs): + if len(valid_xdrs) == 0: + raise NotImplementedError("Platform '{}' does not support {}" + .format(type(self).__name__, feature)) + elif pin.xdr not in valid_xdrs: + raise NotImplementedError("Platform '{}' does not support {} for XDR {}" + .format(type(self).__name__, feature, pin.xdr)) + + if not valid_attrs and attrs: + raise NotImplementedError("Platform '{}' does not support attributes for {}" + .format(type(self).__name__, feature)) + + @staticmethod + def _invert_if(invert, value): + if invert: + return ~value + else: + return value + + def get_input(self, pin, port, attrs, invert): + self._check_feature("single-ended input", pin, attrs, + valid_xdrs=(0,), valid_attrs=None) + + m = Module() + m.d.comb += pin.i.eq(self._invert_if(invert, port)) + return m + + def get_output(self, pin, port, attrs, invert): + self._check_feature("single-ended output", pin, attrs, + valid_xdrs=(0,), valid_attrs=None) + + m = Module() + m.d.comb += port.eq(self._invert_if(invert, pin.o)) + return m + + def get_tristate(self, pin, port, attrs, invert): + self._check_feature("single-ended tristate", pin, attrs, + valid_xdrs=(0,), valid_attrs=None) + + m = Module() + m.submodules += Instance("$tribuf", + p_WIDTH=pin.width, + i_EN=pin.oe, + i_A=self._invert_if(invert, pin.o), + o_Y=port, + ) + return m + + def get_input_output(self, pin, port, attrs, invert): + self._check_feature("single-ended input/output", pin, attrs, + valid_xdrs=(0,), valid_attrs=None) + + m = Module() + m.submodules += Instance("$tribuf", + p_WIDTH=pin.width, + i_EN=pin.oe, + i_A=self._invert_if(invert, pin.o), + o_Y=port, + ) + m.d.comb += pin.i.eq(self._invert_if(invert, port)) + return m + + def get_diff_input(self, pin, port, attrs, invert): + self._check_feature("differential input", pin, attrs, + valid_xdrs=(), valid_attrs=None) + + def get_diff_output(self, pin, port, attrs, invert): + self._check_feature("differential output", pin, attrs, + valid_xdrs=(), valid_attrs=None) + + def get_diff_tristate(self, pin, port, attrs, invert): + self._check_feature("differential tristate", pin, attrs, + valid_xdrs=(), valid_attrs=None) + + def get_diff_input_output(self, pin, port, attrs, invert): + self._check_feature("differential input/output", pin, attrs, + valid_xdrs=(), valid_attrs=None) + + +class TemplatedPlatform(Platform): + toolchain = abstractproperty() + file_templates = abstractproperty() + command_templates = abstractproperty() + + build_script_templates = { + "build_{{name}}.sh": """ + # {{autogenerated}} + set -e{{verbose("x")}} + [ -n "${{platform._deprecated_toolchain_env_var}}" ] && . "${{platform._deprecated_toolchain_env_var}}" + [ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}" + {{emit_commands("sh")}} + """, + "build_{{name}}.bat": """ + @rem {{autogenerated}} + {{quiet("@echo off")}} + if defined {{platform._deprecated_toolchain_env_var}} call %{{platform._deprecated_toolchain_env_var}}% + if defined {{platform._toolchain_env_var}} call %{{platform._toolchain_env_var}}% + {{emit_commands("bat")}} + """, + } + + def iter_clock_constraints(self): + for net_signal, port_signal, frequency in super().iter_clock_constraints(): + # Skip any clock constraints placed on signals that are never used in the design. + # Otherwise, it will cause a crash in the vendor platform if it supports clock + # constraints on non-port nets. + if net_signal not in self._name_map: + continue + yield net_signal, port_signal, frequency + + def toolchain_prepare(self, fragment, name, **kwargs): + # Restrict the name of the design to a strict alphanumeric character set. Platforms will + # interpolate the name of the design in many different contexts: filesystem paths, Python + # scripts, Tcl scripts, ad-hoc constraint files, and so on. It is not practical to add + # escaping code that handles every one of their edge cases, so make sure we never hit them + # in the first place. + invalid_char = re.match(r"[^A-Za-z0-9_]", name) + if invalid_char: + raise ValueError("Design name {!r} contains invalid character {!r}; only alphanumeric " + "characters are valid in design names" + .format(name, invalid_char.group(0))) + + # This notice serves a dual purpose: to explain that the file is autogenerated, + # and to incorporate the Amaranth version into generated code. + autogenerated = "Automatically generated by Amaranth {}. Do not edit.".format(__version__) + + rtlil_text, self._name_map = rtlil.convert_fragment(fragment, name=name) + + def emit_rtlil(): + return rtlil_text + + def emit_verilog(opts=()): + return verilog._convert_rtlil_text(rtlil_text, + strip_internal_attrs=True, write_verilog_opts=opts) + + def emit_debug_verilog(opts=()): + return verilog._convert_rtlil_text(rtlil_text, + strip_internal_attrs=False, write_verilog_opts=opts) + + def emit_commands(syntax): + commands = [] + + for name in self.required_tools: + env_var = tool_env_var(name) + if syntax == "sh": + template = ": ${{{env_var}:={name}}}" + elif syntax == "bat": + template = \ + "if [%{env_var}%] equ [\"\"] set {env_var}=\n" \ + "if [%{env_var}%] equ [] set {env_var}={name}" + else: + assert False + commands.append(template.format(env_var=env_var, name=name)) + + for index, command_tpl in enumerate(self.command_templates): + command = render(command_tpl, origin="".format(index + 1), + syntax=syntax) + command = re.sub(r"\s+", " ", command) + if syntax == "sh": + commands.append(command) + elif syntax == "bat": + commands.append(command + " || exit /b") + else: + assert False + + return "\n".join(commands) + + def get_override(var): + deprecated_var_env = "NMIGEN_{}".format(var) + var_env = "AMARANTH_{}".format(var) + if deprecated_var_env in os.environ or var_env in os.environ: + # On Windows, there is no way to define an "empty but set" variable; it is tempting + # to use a quoted empty string, but it doesn't do what one would expect. Recognize + # this as a useful pattern anyway, and treat `set VAR=""` on Windows the same way + # `export VAR=` is treated on Linux. + if var_env in os.environ: + var_env_value = os.environ[var_env] + elif deprecated_var_env in os.environ: + var_env_value = os.environ[deprecated_var_env] + return re.sub(r'^\"\"$', "", var_env_value) + elif var in kwargs: + if isinstance(kwargs[var], str): + return textwrap.dedent(kwargs[var]).strip() + else: + return kwargs[var] + else: + return jinja2.Undefined(name=var) + + @jinja2.contextfunction + def invoke_tool(context, name): + env_var = tool_env_var(name) + if context.parent["syntax"] == "sh": + return "\"${}\"".format(env_var) + elif context.parent["syntax"] == "bat": + return "%{}%".format(env_var) + else: + assert False + + def options(opts): + if isinstance(opts, str): + return opts + else: + return " ".join(opts) + + def hierarchy(signal, separator): + return separator.join(self._name_map[signal][1:]) + + def ascii_escape(string): + def escape_one(match): + if match.group(1) is None: + return match.group(2) + else: + return "_{:02x}_".format(ord(match.group(1)[0])) + return "".join(escape_one(m) for m in re.finditer(r"([^A-Za-z0-9_])|(.)", string)) + + def tcl_escape(string): + return "{" + re.sub(r"([{}\\])", r"\\\1", string) + "}" + + def tcl_quote(string): + return '"' + re.sub(r"([$[\\])", r"\\\1", string) + '"' + + def verbose(arg): + if get_override("verbose"): + return arg + else: + return jinja2.Undefined(name="quiet") + + def quiet(arg): + if get_override("verbose"): + return jinja2.Undefined(name="quiet") + else: + return arg + + def render(source, origin, syntax=None): + try: + source = textwrap.dedent(source).strip() + compiled = jinja2.Template(source, + trim_blocks=True, lstrip_blocks=True, undefined=jinja2.StrictUndefined) + compiled.environment.filters["options"] = options + compiled.environment.filters["hierarchy"] = hierarchy + compiled.environment.filters["ascii_escape"] = ascii_escape + compiled.environment.filters["tcl_escape"] = tcl_escape + compiled.environment.filters["tcl_quote"] = tcl_quote + except jinja2.TemplateSyntaxError as e: + e.args = ("{} (at {}:{})".format(e.message, origin, e.lineno),) + raise + return compiled.render({ + "name": name, + "platform": self, + "emit_rtlil": emit_rtlil, + "emit_verilog": emit_verilog, + "emit_debug_verilog": emit_debug_verilog, + "emit_commands": emit_commands, + "syntax": syntax, + "invoke_tool": invoke_tool, + "get_override": get_override, + "verbose": verbose, + "quiet": quiet, + "autogenerated": autogenerated, + }) + + plan = BuildPlan(script="build_{}".format(name)) + for filename_tpl, content_tpl in self.file_templates.items(): + plan.add_file(render(filename_tpl, origin=filename_tpl), + render(content_tpl, origin=content_tpl)) + for filename, content in self.extra_files.items(): + plan.add_file(filename, content) + return plan diff --git a/amaranth/build/res.py b/amaranth/build/res.py new file mode 100644 index 0000000..fde981f --- /dev/null +++ b/amaranth/build/res.py @@ -0,0 +1,256 @@ +from collections import OrderedDict + +from ..hdl.ast import * +from ..hdl.rec import * +from ..lib.io import * + +from .dsl import * + + +__all__ = ["ResourceError", "ResourceManager"] + + +class ResourceError(Exception): + pass + + +class ResourceManager: + def __init__(self, resources, connectors): + self.resources = OrderedDict() + self._requested = OrderedDict() + self._phys_reqd = OrderedDict() + + self.connectors = OrderedDict() + self._conn_pins = OrderedDict() + + # Constraint lists + self._ports = [] + self._clocks = SignalDict() + + self.add_resources(resources) + self.add_connectors(connectors) + + def add_resources(self, resources): + for res in resources: + if not isinstance(res, Resource): + raise TypeError("Object {!r} is not a Resource".format(res)) + if (res.name, res.number) in self.resources: + raise NameError("Trying to add {!r}, but {!r} has the same name and number" + .format(res, self.resources[res.name, res.number])) + self.resources[res.name, res.number] = res + + def add_connectors(self, connectors): + for conn in connectors: + if not isinstance(conn, Connector): + raise TypeError("Object {!r} is not a Connector".format(conn)) + if (conn.name, conn.number) in self.connectors: + raise NameError("Trying to add {!r}, but {!r} has the same name and number" + .format(conn, self.connectors[conn.name, conn.number])) + self.connectors[conn.name, conn.number] = conn + + for conn_pin, plat_pin in conn: + assert conn_pin not in self._conn_pins + self._conn_pins[conn_pin] = plat_pin + + def lookup(self, name, number=0): + if (name, number) not in self.resources: + raise ResourceError("Resource {}#{} does not exist" + .format(name, number)) + return self.resources[name, number] + + def request(self, name, number=0, *, dir=None, xdr=None): + resource = self.lookup(name, number) + if (resource.name, resource.number) in self._requested: + raise ResourceError("Resource {}#{} has already been requested" + .format(name, number)) + + def merge_options(subsignal, dir, xdr): + if isinstance(subsignal.ios[0], Subsignal): + if dir is None: + dir = dict() + if xdr is None: + xdr = dict() + if not isinstance(dir, dict): + raise TypeError("Directions must be a dict, not {!r}, because {!r} " + "has subsignals" + .format(dir, subsignal)) + if not isinstance(xdr, dict): + raise TypeError("Data rate must be a dict, not {!r}, because {!r} " + "has subsignals" + .format(xdr, subsignal)) + for sub in subsignal.ios: + sub_dir = dir.get(sub.name, None) + sub_xdr = xdr.get(sub.name, None) + dir[sub.name], xdr[sub.name] = merge_options(sub, sub_dir, sub_xdr) + else: + if dir is None: + dir = subsignal.ios[0].dir + if xdr is None: + xdr = 0 + if dir not in ("i", "o", "oe", "io", "-"): + raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", \"io\", " + "or \"-\", not {!r}" + .format(dir)) + if dir != subsignal.ios[0].dir and \ + not (subsignal.ios[0].dir == "io" or dir == "-"): + raise ValueError("Direction of {!r} cannot be changed from \"{}\" to \"{}\"; " + "direction can be changed from \"io\" to \"i\", \"o\", or " + "\"oe\", or from anything to \"-\"" + .format(subsignal.ios[0], subsignal.ios[0].dir, dir)) + if not isinstance(xdr, int) or xdr < 0: + raise ValueError("Data rate of {!r} must be a non-negative integer, not {!r}" + .format(subsignal.ios[0], xdr)) + return dir, xdr + + def resolve(resource, dir, xdr, name, attrs): + for attr_key, attr_value in attrs.items(): + if hasattr(attr_value, "__call__"): + attr_value = attr_value(self) + assert attr_value is None or isinstance(attr_value, str) + if attr_value is None: + del attrs[attr_key] + else: + attrs[attr_key] = attr_value + + if isinstance(resource.ios[0], Subsignal): + fields = OrderedDict() + for sub in resource.ios: + fields[sub.name] = resolve(sub, dir[sub.name], xdr[sub.name], + name="{}__{}".format(name, sub.name), + attrs={**attrs, **sub.attrs}) + return Record([ + (f_name, f.layout) for (f_name, f) in fields.items() + ], fields=fields, name=name) + + elif isinstance(resource.ios[0], (Pins, DiffPairs)): + phys = resource.ios[0] + if isinstance(phys, Pins): + phys_names = phys.names + port = Record([("io", len(phys))], name=name) + if isinstance(phys, DiffPairs): + phys_names = [] + record_fields = [] + if not self.should_skip_port_component(None, attrs, "p"): + phys_names += phys.p.names + record_fields.append(("p", len(phys))) + if not self.should_skip_port_component(None, attrs, "n"): + phys_names += phys.n.names + record_fields.append(("n", len(phys))) + port = Record(record_fields, name=name) + if dir == "-": + pin = None + else: + pin = Pin(len(phys), dir, xdr=xdr, name=name) + + for phys_name in phys_names: + if phys_name in self._phys_reqd: + raise ResourceError("Resource component {} uses physical pin {}, but it " + "is already used by resource component {} that was " + "requested earlier" + .format(name, phys_name, self._phys_reqd[phys_name])) + self._phys_reqd[phys_name] = name + + self._ports.append((resource, pin, port, attrs)) + + if pin is not None and resource.clock is not None: + self.add_clock_constraint(pin.i, resource.clock.frequency) + + return pin if pin is not None else port + + else: + assert False # :nocov: + + value = resolve(resource, + *merge_options(resource, dir, xdr), + name="{}_{}".format(resource.name, resource.number), + attrs=resource.attrs) + self._requested[resource.name, resource.number] = value + return value + + def iter_single_ended_pins(self): + for res, pin, port, attrs in self._ports: + if pin is None: + continue + if isinstance(res.ios[0], Pins): + yield pin, port, attrs, res.ios[0].invert + + def iter_differential_pins(self): + for res, pin, port, attrs in self._ports: + if pin is None: + continue + if isinstance(res.ios[0], DiffPairs): + yield pin, port, attrs, res.ios[0].invert + + def should_skip_port_component(self, port, attrs, component): + return False + + def iter_ports(self): + for res, pin, port, attrs in self._ports: + if isinstance(res.ios[0], Pins): + if not self.should_skip_port_component(port, attrs, "io"): + yield port.io + elif isinstance(res.ios[0], DiffPairs): + if not self.should_skip_port_component(port, attrs, "p"): + yield port.p + if not self.should_skip_port_component(port, attrs, "n"): + yield port.n + else: + assert False + + def iter_port_constraints(self): + for res, pin, port, attrs in self._ports: + if isinstance(res.ios[0], Pins): + if not self.should_skip_port_component(port, attrs, "io"): + yield port.io.name, res.ios[0].map_names(self._conn_pins, res), attrs + elif isinstance(res.ios[0], DiffPairs): + if not self.should_skip_port_component(port, attrs, "p"): + yield port.p.name, res.ios[0].p.map_names(self._conn_pins, res), attrs + if not self.should_skip_port_component(port, attrs, "n"): + yield port.n.name, res.ios[0].n.map_names(self._conn_pins, res), attrs + else: + assert False + + def iter_port_constraints_bits(self): + for port_name, pin_names, attrs in self.iter_port_constraints(): + if len(pin_names) == 1: + yield port_name, pin_names[0], attrs + else: + for bit, pin_name in enumerate(pin_names): + yield "{}[{}]".format(port_name, bit), pin_name, attrs + + def add_clock_constraint(self, clock, frequency): + if not isinstance(clock, Signal): + raise TypeError("Object {!r} is not a Signal".format(clock)) + if not isinstance(frequency, (int, float)): + raise TypeError("Frequency must be a number, not {!r}".format(frequency)) + + if clock in self._clocks: + raise ValueError("Cannot add clock constraint on {!r}, which is already constrained " + "to {} Hz" + .format(clock, self._clocks[clock])) + else: + self._clocks[clock] = float(frequency) + + def iter_clock_constraints(self): + # Back-propagate constraints through the input buffer. For clock constraints on pins + # (the majority of cases), toolchains work better if the constraint is defined on the pin + # and not on the buffered internal net; and if the toolchain is advanced enough that + # it considers clock phase and delay of the input buffer, it is *necessary* to define + # the constraint on the pin to match the designer's expectation of phase being referenced + # to the pin. + # + # Constraints on nets with no corresponding input pin (e.g. PLL or SERDES outputs) are not + # affected. + pin_i_to_port = SignalDict() + for res, pin, port, attrs in self._ports: + if hasattr(pin, "i"): + if isinstance(res.ios[0], Pins): + pin_i_to_port[pin.i] = port.io + elif isinstance(res.ios[0], DiffPairs): + pin_i_to_port[pin.i] = port.p + else: + assert False + + for net_signal, frequency in self._clocks.items(): + port_signal = pin_i_to_port.get(net_signal) + yield net_signal, port_signal, frequency diff --git a/amaranth/build/run.py b/amaranth/build/run.py new file mode 100644 index 0000000..f87653d --- /dev/null +++ b/amaranth/build/run.py @@ -0,0 +1,268 @@ +from collections import OrderedDict +from contextlib import contextmanager +from abc import ABCMeta, abstractmethod +import os +import sys +import subprocess +import tempfile +import zipfile +import hashlib +import pathlib + + +__all__ = ["BuildPlan", "BuildProducts", "LocalBuildProducts", "RemoteSSHBuildProducts"] + + + +class BuildPlan: + def __init__(self, script): + """A build plan. + + Parameters + ---------- + script : str + The base name (without extension) of the script that will be executed. + """ + self.script = script + self.files = OrderedDict() + + def add_file(self, filename, content): + """ + Add ``content``, which can be a :class:`str`` or :class:`bytes`, to the build plan + as ``filename``. The file name can be a relative path with directories separated by + forward slashes (``/``). + """ + assert isinstance(filename, str) and filename not in self.files + self.files[filename] = content + + def digest(self, size=64): + """ + Compute a `digest`, a short byte sequence deterministically and uniquely identifying + this build plan. + """ + hasher = hashlib.blake2b(digest_size=size) + for filename in sorted(self.files): + hasher.update(filename.encode("utf-8")) + content = self.files[filename] + if isinstance(content, str): + content = content.encode("utf-8") + hasher.update(content) + hasher.update(self.script.encode("utf-8")) + return hasher.digest() + + def archive(self, file): + """ + Archive files from the build plan into ``file``, which can be either a filename, or + a file-like object. The produced archive is deterministic: exact same files will + always produce exact same archive. + """ + with zipfile.ZipFile(file, "w") as archive: + # Write archive members in deterministic order and with deterministic timestamp. + for filename in sorted(self.files): + archive.writestr(zipfile.ZipInfo(filename), self.files[filename]) + + def execute_local(self, root="build", *, run_script=True): + """ + Execute build plan using the local strategy. Files from the build plan are placed in + the build root directory ``root``, and, if ``run_script`` is ``True``, the script + appropriate for the platform (``{script}.bat`` on Windows, ``{script}.sh`` elsewhere) is + executed in the build root. + + Returns :class:`LocalBuildProducts`. + """ + os.makedirs(root, exist_ok=True) + cwd = os.getcwd() + try: + os.chdir(root) + + for filename, content in self.files.items(): + filename = pathlib.Path(filename) + # Forbid parent directory components completely to avoid the possibility + # of writing outside the build root. + assert ".." not in filename.parts + dirname = os.path.dirname(filename) + if dirname: + os.makedirs(dirname, exist_ok=True) + + mode = "wt" if isinstance(content, str) else "wb" + with open(filename, mode) as f: + f.write(content) + + if run_script: + if sys.platform.startswith("win32"): + # Without "call", "cmd /c {}.bat" will return 0. + # See https://stackoverflow.com/a/30736987 for a detailed explanation of why. + # Running the script manually from a command prompt is unaffected. + subprocess.check_call(["cmd", "/c", "call {}.bat".format(self.script)]) + else: + subprocess.check_call(["sh", "{}.sh".format(self.script)]) + + return LocalBuildProducts(os.getcwd()) + + finally: + os.chdir(cwd) + + def execute_remote_ssh(self, *, connect_to = {}, root, run_script=True): + """ + Execute build plan using the remote SSH strategy. Files from the build + plan are transferred via SFTP to the directory ``root`` on a remote + server. If ``run_script`` is ``True``, the ``paramiko`` SSH client will + then run ``{script}.sh``. ``root`` can either be an absolute or + relative (to the login directory) path. + + ``connect_to`` is a dictionary that holds all input arguments to + ``paramiko``'s ``SSHClient.connect`` + (`documentation `_). + At a minimum, the ``hostname`` input argument must be supplied in this + dictionary as the remote server. + + Returns :class:`RemoteSSHBuildProducts`. + """ + from paramiko import SSHClient + + with SSHClient() as client: + client.load_system_host_keys() + client.connect(**connect_to) + + with client.open_sftp() as sftp: + def mkdir_exist_ok(path): + try: + sftp.mkdir(str(path)) + except IOError as e: + # mkdir fails if directory exists. This is fine in amaranth.build. + # Reraise errors containing e.errno info. + if e.errno: + raise e + + def mkdirs(path): + # Iteratively create parent directories of a file by iterating over all + # parents except for the root ("."). Slicing the parents results in + # TypeError, so skip over the root ("."); this also handles files + # already in the root directory. + for parent in reversed(path.parents): + if parent == pathlib.PurePosixPath("."): + continue + else: + mkdir_exist_ok(parent) + + mkdir_exist_ok(root) + + sftp.chdir(root) + for filename, content in self.files.items(): + filename = pathlib.PurePosixPath(filename) + assert ".." not in filename.parts + + mkdirs(filename) + + mode = "wt" if isinstance(content, str) else "wb" + with sftp.file(str(filename), mode) as f: + # "b/t" modifier ignored in SFTP. + if mode == "wt": + f.write(content.encode("utf-8")) + else: + f.write(content) + + if run_script: + transport = client.get_transport() + channel = transport.open_session() + channel.set_combine_stderr(True) + + cmd = "if [ -f ~/.profile ]; then . ~/.profile; fi && cd {} && sh {}.sh".format(root, self.script) + channel.exec_command(cmd) + + # Show the output from the server while products are built. + buf = channel.recv(1024) + while buf: + print(buf.decode("utf-8"), end="") + buf = channel.recv(1024) + + return RemoteSSHBuildProducts(connect_to, root) + + def execute(self): + """ + Execute build plan using the default strategy. Use one of the ``execute_*`` methods + explicitly to have more control over the strategy. + """ + return self.execute_local() + + +class BuildProducts(metaclass=ABCMeta): + @abstractmethod + def get(self, filename, mode="b"): + """ + Extract ``filename`` from build products, and return it as a :class:`bytes` (if ``mode`` + is ``"b"``) or a :class:`str` (if ``mode`` is ``"t"``). + """ + assert mode in ("b", "t") + + @contextmanager + def extract(self, *filenames): + """ + Extract ``filenames`` from build products, place them in an OS-specific temporary file + location, with the extension preserved, and delete them afterwards. This method is used + as a context manager, e.g.: :: + + with products.extract("bitstream.bin", "programmer.cfg") \ + as bitstream_filename, config_filename: + subprocess.check_call(["program", "-c", config_filename, bitstream_filename]) + """ + files = [] + try: + for filename in filenames: + # On Windows, a named temporary file (as created by Python) is not accessible to + # others if it's still open within the Python process, so we close it and delete + # it manually. + file = tempfile.NamedTemporaryFile( + prefix="amaranth_", suffix="_" + os.path.basename(filename), + delete=False) + files.append(file) + file.write(self.get(filename)) + file.close() + + if len(files) == 0: + return (yield) + elif len(files) == 1: + return (yield files[0].name) + else: + return (yield [file.name for file in files]) + finally: + for file in files: + os.unlink(file.name) + + +class LocalBuildProducts(BuildProducts): + def __init__(self, root): + # We provide no guarantees that files will be available on the local filesystem (i.e. in + # any way other than through `products.get()`) in general, so downstream code must never + # rely on this, even when we happen to use a local build most of the time. + self.__root = root + + def get(self, filename, mode="b"): + super().get(filename, mode) + with open(os.path.join(self.__root, filename), "r" + mode) as f: + return f.read() + + +class RemoteSSHBuildProducts(BuildProducts): + def __init__(self, connect_to, root): + self.__connect_to = connect_to + self.__root = root + + def get(self, filename, mode="b"): + super().get(filename, mode) + + from paramiko import SSHClient + + with SSHClient() as client: + client.load_system_host_keys() + client.connect(**self.__connect_to) + + with client.open_sftp() as sftp: + sftp.chdir(self.__root) + + with sftp.file(filename, "r" + mode) as f: + # "b/t" modifier ignored in SFTP. + if mode == "t": + return f.read().decode("utf-8") + else: + return f.read() diff --git a/amaranth/cli.py b/amaranth/cli.py new file mode 100644 index 0000000..51e9a95 --- /dev/null +++ b/amaranth/cli.py @@ -0,0 +1,78 @@ +import argparse + +from .hdl.ir import Fragment +from .back import rtlil, cxxrtl, verilog +from .sim import Simulator + + +__all__ = ["main"] + + +def main_parser(parser=None): + if parser is None: + parser = argparse.ArgumentParser() + + p_action = parser.add_subparsers(dest="action") + + p_generate = p_action.add_parser("generate", + help="generate RTLIL, Verilog or CXXRTL from the design") + p_generate.add_argument("-t", "--type", dest="generate_type", + metavar="LANGUAGE", choices=["il", "cc", "v"], + help="generate LANGUAGE (il for RTLIL, v for Verilog, cc for CXXRTL; default: file extension of FILE, if given)") + p_generate.add_argument("generate_file", + metavar="FILE", type=argparse.FileType("w"), nargs="?", + help="write generated code to FILE") + + p_simulate = p_action.add_parser( + "simulate", help="simulate the design") + p_simulate.add_argument("-v", "--vcd-file", + metavar="VCD-FILE", type=argparse.FileType("w"), + help="write execution trace to VCD-FILE") + p_simulate.add_argument("-w", "--gtkw-file", + metavar="GTKW-FILE", type=argparse.FileType("w"), + help="write GTKWave configuration to GTKW-FILE") + p_simulate.add_argument("-p", "--period", dest="sync_period", + metavar="TIME", type=float, default=1e-6, + help="set 'sync' clock domain period to TIME (default: %(default)s)") + p_simulate.add_argument("-c", "--clocks", dest="sync_clocks", + metavar="COUNT", type=int, required=True, + help="simulate for COUNT 'sync' clock periods") + + return parser + + +def main_runner(parser, args, design, platform=None, name="top", ports=()): + if args.action == "generate": + fragment = Fragment.get(design, platform) + generate_type = args.generate_type + if generate_type is None and args.generate_file: + if args.generate_file.name.endswith(".il"): + generate_type = "il" + if args.generate_file.name.endswith(".cc"): + generate_type = "cc" + if args.generate_file.name.endswith(".v"): + generate_type = "v" + if generate_type is None: + parser.error("Unable to auto-detect language, specify explicitly with -t/--type") + if generate_type == "il": + output = rtlil.convert(fragment, name=name, ports=ports) + if generate_type == "cc": + output = cxxrtl.convert(fragment, name=name, ports=ports) + if generate_type == "v": + output = verilog.convert(fragment, name=name, ports=ports) + if args.generate_file: + args.generate_file.write(output) + else: + print(output) + + if args.action == "simulate": + fragment = Fragment.get(design, platform) + sim = Simulator(fragment) + sim.add_clock(args.sync_period) + with sim.write_vcd(vcd_file=args.vcd_file, gtkw_file=args.gtkw_file, traces=ports): + sim.run_until(args.sync_period * args.sync_clocks, run_passive=True) + + +def main(*args, **kwargs): + parser = main_parser() + main_runner(parser, parser.parse_args(), *args, **kwargs) diff --git a/amaranth/compat/__init__.py b/amaranth/compat/__init__.py new file mode 100644 index 0000000..bdf1313 --- /dev/null +++ b/amaranth/compat/__init__.py @@ -0,0 +1,11 @@ +from .fhdl.structure import * +from .fhdl.module import * +from .fhdl.specials import * +from .fhdl.bitcontainer import * +from .fhdl.decorators import * +# from .fhdl.simplify import * + +from .sim import * + +from .genlib.record import * +from .genlib.fsm import * diff --git a/amaranth/compat/fhdl/__init__.py b/amaranth/compat/fhdl/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/amaranth/compat/fhdl/bitcontainer.py b/amaranth/compat/fhdl/bitcontainer.py new file mode 100644 index 0000000..9f0edfd --- /dev/null +++ b/amaranth/compat/fhdl/bitcontainer.py @@ -0,0 +1,21 @@ +from ... import utils +from ...hdl import ast +from ..._utils import deprecated + + +__all__ = ["log2_int", "bits_for", "value_bits_sign"] + + +@deprecated("instead of `log2_int`, use `amaranth.utils.log2_int`") +def log2_int(n, need_pow2=True): + return utils.log2_int(n, need_pow2) + + +@deprecated("instead of `bits_for`, use `amaranth.utils.bits_for`") +def bits_for(n, require_sign_bit=False): + return utils.bits_for(n, require_sign_bit) + + +@deprecated("instead of `value_bits_sign(v)`, use `v.shape()`") +def value_bits_sign(v): + return tuple(ast.Value.cast(v).shape()) diff --git a/amaranth/compat/fhdl/conv_output.py b/amaranth/compat/fhdl/conv_output.py new file mode 100644 index 0000000..793fad2 --- /dev/null +++ b/amaranth/compat/fhdl/conv_output.py @@ -0,0 +1,35 @@ +from operator import itemgetter + + +class ConvOutput: + def __init__(self): + self.main_source = "" + self.data_files = dict() + + def set_main_source(self, src): + self.main_source = src + + def add_data_file(self, filename_base, content): + filename = filename_base + i = 1 + while filename in self.data_files: + parts = filename_base.split(".", maxsplit=1) + parts[0] += "_" + str(i) + filename = ".".join(parts) + i += 1 + self.data_files[filename] = content + return filename + + def __str__(self): + r = self.main_source + "\n" + for filename, content in sorted(self.data_files.items(), + key=itemgetter(0)): + r += filename + ":\n" + content + return r + + def write(self, main_filename): + with open(main_filename, "w") as f: + f.write(self.main_source) + for filename, content in self.data_files.items(): + with open(filename, "w") as f: + f.write(content) diff --git a/amaranth/compat/fhdl/decorators.py b/amaranth/compat/fhdl/decorators.py new file mode 100644 index 0000000..1b61eed --- /dev/null +++ b/amaranth/compat/fhdl/decorators.py @@ -0,0 +1,55 @@ +from ...hdl.ast import * +from ...hdl.xfrm import ResetInserter as NativeResetInserter +from ...hdl.xfrm import EnableInserter as NativeEnableInserter +from ...hdl.xfrm import DomainRenamer as NativeDomainRenamer +from ..._utils import deprecated + + +__all__ = ["ResetInserter", "CEInserter", "ClockDomainsRenamer"] + + +class _CompatControlInserter: + _control_name = None + _native_inserter = None + + def __init__(self, clock_domains=None): + self.clock_domains = clock_domains + + def __call__(self, module): + if self.clock_domains is None: + signals = {self._control_name: ("sync", Signal(name=self._control_name))} + else: + def name(cd): + return self._control_name + "_" + cd + signals = {name(cd): (cd, Signal(name=name(cd))) for cd in self.clock_domains} + for name, (cd, signal) in signals.items(): + setattr(module, name, signal) + return self._native_inserter(dict(signals.values()))(module) + + +@deprecated("instead of `migen.fhdl.decorators.ResetInserter`, " + "use `amaranth.hdl.xfrm.ResetInserter`; note that Amaranth ResetInserter accepts " + "a dict of reset signals (or a single reset signal) as an argument, not " + "a set of clock domain names (or a single clock domain name)") +class CompatResetInserter(_CompatControlInserter): + _control_name = "reset" + _native_inserter = NativeResetInserter + + +@deprecated("instead of `migen.fhdl.decorators.CEInserter`, " + "use `amaranth.hdl.xfrm.EnableInserter`; note that Amaranth EnableInserter accepts " + "a dict of enable signals (or a single enable signal) as an argument, not " + "a set of clock domain names (or a single clock domain name)") +class CompatCEInserter(_CompatControlInserter): + _control_name = "ce" + _native_inserter = NativeEnableInserter + + +class CompatClockDomainsRenamer(NativeDomainRenamer): + def __init__(self, cd_remapping): + super().__init__(cd_remapping) + + +ResetInserter = CompatResetInserter +CEInserter = CompatCEInserter +ClockDomainsRenamer = CompatClockDomainsRenamer diff --git a/amaranth/compat/fhdl/module.py b/amaranth/compat/fhdl/module.py new file mode 100644 index 0000000..f1b3cc5 --- /dev/null +++ b/amaranth/compat/fhdl/module.py @@ -0,0 +1,163 @@ +from collections.abc import Iterable + +from ..._utils import flatten, deprecated +from ...hdl import dsl, ir + + +__all__ = ["Module", "FinalizeError"] + + +def _flat_list(e): + if isinstance(e, Iterable): + return list(flatten(e)) + else: + return [e] + + +class CompatFinalizeError(Exception): + pass + + +FinalizeError = CompatFinalizeError + + +class _CompatModuleProxy: + def __init__(self, cm): + object.__setattr__(self, "_cm", cm) + + +class _CompatModuleComb(_CompatModuleProxy): + @deprecated("instead of `self.comb +=`, use `m.d.comb +=`") + def __iadd__(self, assigns): + self._cm._module._add_statement(assigns, domain=None, depth=0, compat_mode=True) + return self + + +class _CompatModuleSyncCD: + def __init__(self, cm, cd): + self._cm = cm + self._cd = cd + + @deprecated("instead of `self.sync. +=`, use `m.d. +=`") + def __iadd__(self, assigns): + self._cm._module._add_statement(assigns, domain=self._cd, depth=0, compat_mode=True) + return self + + +class _CompatModuleSync(_CompatModuleProxy): + @deprecated("instead of `self.sync +=`, use `m.d.sync +=`") + def __iadd__(self, assigns): + self._cm._module._add_statement(assigns, domain="sync", depth=0, compat_mode=True) + return self + + def __getattr__(self, name): + return _CompatModuleSyncCD(self._cm, name) + + def __setattr__(self, name, value): + if not isinstance(value, _CompatModuleSyncCD): + raise AttributeError("Attempted to assign sync property - use += instead") + + +class _CompatModuleSpecials(_CompatModuleProxy): + @deprecated("instead of `self.specials. =`, use `m.submodules. =`") + def __setattr__(self, name, value): + self._cm._submodules.append((name, value)) + setattr(self._cm, name, value) + + @deprecated("instead of `self.specials +=`, use `m.submodules +=`") + def __iadd__(self, other): + self._cm._submodules += [(None, e) for e in _flat_list(other)] + return self + + +class _CompatModuleSubmodules(_CompatModuleProxy): + @deprecated("instead of `self.submodules. =`, use `m.submodules. =`") + def __setattr__(self, name, value): + self._cm._submodules.append((name, value)) + setattr(self._cm, name, value) + + @deprecated("instead of `self.submodules +=`, use `m.submodules +=`") + def __iadd__(self, other): + self._cm._submodules += [(None, e) for e in _flat_list(other)] + return self + + +class _CompatModuleClockDomains(_CompatModuleProxy): + @deprecated("instead of `self.clock_domains. =`, use `m.domains. =`") + def __setattr__(self, name, value): + self.__iadd__(value) + setattr(self._cm, name, value) + + @deprecated("instead of `self.clock_domains +=`, use `m.domains +=`") + def __iadd__(self, other): + self._cm._module.domains += _flat_list(other) + return self + + +class CompatModule(ir.Elaboratable): + _MustUse__silence = True + + # Actually returns another Amaranth Elaboratable (amaranth.dsl.Module), not a Fragment. + def get_fragment(self): + assert not self.get_fragment_called + self.get_fragment_called = True + self.finalize() + return self._module + + def elaborate(self, platform): + if not self.get_fragment_called: + self.get_fragment() + return self._module + + def __getattr__(self, name): + if name == "comb": + return _CompatModuleComb(self) + elif name == "sync": + return _CompatModuleSync(self) + elif name == "specials": + return _CompatModuleSpecials(self) + elif name == "submodules": + return _CompatModuleSubmodules(self) + elif name == "clock_domains": + return _CompatModuleClockDomains(self) + elif name == "finalized": + self.finalized = False + return self.finalized + elif name == "_module": + self._module = dsl.Module() + return self._module + elif name == "_submodules": + self._submodules = [] + return self._submodules + elif name == "_clock_domains": + self._clock_domains = [] + return self._clock_domains + elif name == "get_fragment_called": + self.get_fragment_called = False + return self.get_fragment_called + else: + raise AttributeError("'{}' object has no attribute '{}'" + .format(type(self).__name__, name)) + + def finalize(self, *args, **kwargs): + def finalize_submodules(): + for name, submodule in self._submodules: + if not hasattr(submodule, "finalize"): + continue + if submodule.finalized: + continue + submodule.finalize(*args, **kwargs) + + if not self.finalized: + self.finalized = True + finalize_submodules() + self.do_finalize(*args, **kwargs) + finalize_submodules() + for name, submodule in self._submodules: + self._module._add_submodule(submodule, name) + + def do_finalize(self): + pass + + +Module = CompatModule diff --git a/amaranth/compat/fhdl/specials.py b/amaranth/compat/fhdl/specials.py new file mode 100644 index 0000000..901fc67 --- /dev/null +++ b/amaranth/compat/fhdl/specials.py @@ -0,0 +1,145 @@ +import warnings + +from ..._utils import deprecated, extend +from ...hdl.ast import * +from ...hdl.ir import Elaboratable +from ...hdl.mem import Memory as NativeMemory +from ...hdl.ir import Fragment, Instance +from ...hdl.dsl import Module +from .module import Module as CompatModule +from .structure import Signal +from ...lib.io import Pin + + +__all__ = ["TSTriple", "Instance", "Memory", "READ_FIRST", "WRITE_FIRST", "NO_CHANGE"] + + +class TSTriple: + def __init__(self, bits_sign=None, min=None, max=None, reset_o=0, reset_oe=0, reset_i=0, + name=None): + self.o = Signal(bits_sign, min=min, max=max, reset=reset_o, + name=None if name is None else name + "_o") + self.oe = Signal(reset=reset_oe, + name=None if name is None else name + "_oe") + self.i = Signal(bits_sign, min=min, max=max, reset=reset_i, + name=None if name is None else name + "_i") + + def __len__(self): + return len(self.o) + + def get_tristate(self, io): + return Tristate(io, self.o, self.oe, self.i) + + +class Tristate(Elaboratable): + def __init__(self, target, o, oe, i=None): + self.target = target + self.o = o + self.oe = oe + self.i = i if i is not None else None + + def elaborate(self, platform): + if self.i is None: + pin = Pin(len(self.target), dir="oe") + pin.o = self.o + pin.oe = self.oe + return platform.get_tristate(pin, self.target, attrs={}, invert=None) + else: + pin = Pin(len(self.target), dir="io") + pin.o = self.o + pin.oe = self.oe + pin.i = self.i + return platform.get_input_output(pin, self.target, attrs={}, invert=None) + + m = Module() + if self.i is not None: + m.d.comb += self.i.eq(self.target) + m.submodules += Instance("$tribuf", + p_WIDTH=len(self.target), + i_EN=self.oe, + i_A=self.o, + o_Y=self.target, + ) + + f = m.elaborate(platform) + f.flatten = True + return f + + +(READ_FIRST, WRITE_FIRST, NO_CHANGE) = range(3) + + +class _MemoryPort(CompatModule): + def __init__(self, adr, dat_r, we=None, dat_w=None, async_read=False, re=None, + we_granularity=0, mode=WRITE_FIRST, clock_domain="sync"): + self.adr = adr + self.dat_r = dat_r + self.we = we + self.dat_w = dat_w + self.async_read = async_read + self.re = re + self.we_granularity = we_granularity + self.mode = mode + self.clock = ClockSignal(clock_domain) + + +@extend(NativeMemory) +@deprecated("it is not necessary or permitted to add Memory as a special or submodule") +def elaborate(self, platform): + return Fragment() + + +class CompatMemory(NativeMemory, Elaboratable): + def __init__(self, width, depth, init=None, name=None): + super().__init__(width=width, depth=depth, init=init, name=name) + + @deprecated("instead of `get_port()`, use `read_port()` and `write_port()`") + def get_port(self, write_capable=False, async_read=False, has_re=False, we_granularity=0, + mode=WRITE_FIRST, clock_domain="sync"): + if we_granularity >= self.width: + warnings.warn("do not specify `we_granularity` greater than memory width, as it " + "is a hard error in non-compatibility mode", + DeprecationWarning, stacklevel=1) + we_granularity = 0 + if we_granularity == 0: + warnings.warn("instead of `we_granularity=0`, use `we_granularity=None` or avoid " + "specifying it at all, as it is a hard error in non-compatibility mode", + DeprecationWarning, stacklevel=1) + we_granularity = None + assert mode != NO_CHANGE + rdport = self.read_port(domain="comb" if async_read else clock_domain, + transparent=mode == WRITE_FIRST) + rdport.addr.name = "{}_addr".format(self.name) + adr = rdport.addr + dat_r = rdport.data + if write_capable: + wrport = self.write_port(domain=clock_domain, granularity=we_granularity) + wrport.addr = rdport.addr + we = wrport.en + dat_w = wrport.data + else: + we = None + dat_w = None + if has_re: + if mode == READ_FIRST: + re = rdport.en + else: + warnings.warn("the combination of `has_re=True` and `mode=WRITE_FIRST` has " + "surprising behavior: keeping `re` low would merely latch " + "the address, while the data will change with changing memory " + "contents; avoid using `re` with transparent ports as it is a hard " + "error in non-compatibility mode", + DeprecationWarning, stacklevel=1) + re = Signal() + else: + re = None + mp = _MemoryPort(adr, dat_r, we, dat_w, + async_read, re, we_granularity, mode, + clock_domain) + mp.submodules.rdport = rdport + if write_capable: + mp.submodules.wrport = wrport + return mp + + +Memory = CompatMemory diff --git a/amaranth/compat/fhdl/structure.py b/amaranth/compat/fhdl/structure.py new file mode 100644 index 0000000..d450e45 --- /dev/null +++ b/amaranth/compat/fhdl/structure.py @@ -0,0 +1,185 @@ +import builtins +import warnings +from collections import OrderedDict + +from ...utils import bits_for +from ..._utils import deprecated, extend +from ...hdl import ast +from ...hdl.ast import (DUID, + Shape, signed, unsigned, + Value, Const, C, Mux, Slice as _Slice, Part, Cat, Repl, + Signal as NativeSignal, + ClockSignal, ResetSignal, + Array, ArrayProxy as _ArrayProxy) +from ...hdl.cd import ClockDomain + + +__all__ = ["DUID", "wrap", "Mux", "Cat", "Replicate", "Constant", "C", "Signal", "ClockSignal", + "ResetSignal", "If", "Case", "Array", "ClockDomain"] + + +@deprecated("instead of `wrap`, use `Value.cast`") +def wrap(v): + return Value.cast(v) + + +class CompatSignal(NativeSignal): + def __init__(self, bits_sign=None, name=None, variable=False, reset=0, + reset_less=False, name_override=None, min=None, max=None, + related=None, attr=None, src_loc_at=0, **kwargs): + if min is not None or max is not None: + warnings.warn("instead of `Signal(min={min}, max={max})`, " + "use `Signal(range({min}, {max}))`" + .format(min=min or 0, max=max or 2), + DeprecationWarning, stacklevel=2 + src_loc_at) + + if bits_sign is None: + if min is None: + min = 0 + if max is None: + max = 2 + max -= 1 # make both bounds inclusive + if min > max: + raise ValueError("Lower bound {} should be less or equal to higher bound {}" + .format(min, max + 1)) + sign = min < 0 or max < 0 + if min == max: + bits = 0 + else: + bits = builtins.max(bits_for(min, sign), bits_for(max, sign)) + shape = signed(bits) if sign else unsigned(bits) + else: + if not (min is None and max is None): + raise ValueError("Only one of bits/signedness or bounds may be specified") + shape = bits_sign + + super().__init__(shape=shape, name=name_override or name, + reset=reset, reset_less=reset_less, + attrs=attr, src_loc_at=1 + src_loc_at, **kwargs) + + +Signal = CompatSignal + + +@deprecated("instead of `Constant`, use `Const`") +def Constant(value, bits_sign=None): + return Const(value, bits_sign) + + +@deprecated("instead of `Replicate`, use `Repl`") +def Replicate(v, n): + return Repl(v, n) + + +@extend(Const) +@property +@deprecated("instead of `.nbits`, use `.width`") +def nbits(self): + return self.width + + +@extend(NativeSignal) +@property +@deprecated("instead of `.nbits`, use `.width`") +def nbits(self): + return self.width + + +@extend(NativeSignal) +@NativeSignal.nbits.setter +@deprecated("instead of `.nbits = x`, use `.width = x`") +def nbits(self, value): + self.width = value + + +@extend(NativeSignal) +@deprecated("instead of `.part`, use `.bit_select`") +def part(self, offset, width): + return Part(self, offset, width, src_loc_at=2) + + +@extend(Cat) +@property +@deprecated("instead of `.l`, use `.parts`") +def l(self): + return self.parts + + +@extend(ast.Operator) +@property +@deprecated("instead of `.op`, use `.operator`") +def op(self): + return self.operator + + +@extend(_ArrayProxy) +@property +@deprecated("instead `_ArrayProxy.choices`, use `ArrayProxy.elems`") +def choices(self): + return self.elems + + +class If(ast.Switch): + @deprecated("instead of `If(cond, ...)`, use `with m.If(cond): ...`") + def __init__(self, cond, *stmts): + cond = Value.cast(cond) + if len(cond) != 1: + cond = cond.bool() + super().__init__(cond, {("1",): ast.Statement.cast(stmts)}) + + @deprecated("instead of `.Elif(cond, ...)`, use `with m.Elif(cond): ...`") + def Elif(self, cond, *stmts): + cond = Value.cast(cond) + if len(cond) != 1: + cond = cond.bool() + self.cases = OrderedDict((("-" + k,), v) for (k,), v in self.cases.items()) + self.cases[("1" + "-" * len(self.test),)] = ast.Statement.cast(stmts) + self.test = Cat(self.test, cond) + return self + + @deprecated("instead of `.Else(...)`, use `with m.Else(): ...`") + def Else(self, *stmts): + self.cases[()] = ast.Statement.cast(stmts) + return self + + +class Case(ast.Switch): + @deprecated("instead of `Case(test, { value: stmts })`, use `with m.Switch(test):` and " + "`with m.Case(value): stmts`; instead of `\"default\": stmts`, use " + "`with m.Case(): stmts`") + def __init__(self, test, cases): + new_cases = [] + default = None + for k, v in cases.items(): + if isinstance(k, (bool, int)): + k = Const(k) + if (not isinstance(k, Const) + and not (isinstance(k, str) and k == "default")): + raise TypeError("Case object is not a Migen constant") + if isinstance(k, str) and k == "default": + default = v + continue + else: + k = k.value + new_cases.append((k, v)) + if default is not None: + new_cases.append((None, default)) + super().__init__(test, OrderedDict(new_cases)) + + @deprecated("instead of `Case(...).makedefault()`, use an explicit default case: " + "`with m.Case(): ...`") + def makedefault(self, key=None): + if key is None: + for choice in self.cases.keys(): + if (key is None + or (isinstance(choice, str) and choice == "default") + or choice > key): + key = choice + elif isinstance(key, str) and key == "default": + key = () + else: + key = ("{:0{}b}".format(ast.Value.cast(key).value, len(self.test)),) + stmts = self.cases[key] + del self.cases[key] + self.cases[()] = stmts + return self diff --git a/amaranth/compat/fhdl/verilog.py b/amaranth/compat/fhdl/verilog.py new file mode 100644 index 0000000..3773cc6 --- /dev/null +++ b/amaranth/compat/fhdl/verilog.py @@ -0,0 +1,35 @@ +import warnings + +from ...hdl.ir import Fragment +from ...hdl.cd import ClockDomain +from ...back import verilog +from .conv_output import ConvOutput +from .module import Module + + +def convert(fi, ios=None, name="top", special_overrides=dict(), + attr_translate=None, create_clock_domains=True, + display_run=False): + if display_run: + warnings.warn("`display_run=True` support has been removed", + DeprecationWarning, stacklevel=1) + if special_overrides: + warnings.warn("`special_overrides` support as well as `Special` has been removed", + DeprecationWarning, stacklevel=1) + # TODO: attr_translate + + if isinstance(fi, Module): + fi = fi.get_fragment() + + def missing_domain(name): + if create_clock_domains: + return ClockDomain(name) + v_output = verilog.convert( + elaboratable=fi, + name=name, + ports=ios or (), + missing_domain=missing_domain + ) + output = ConvOutput() + output.set_main_source(v_output) + return output diff --git a/amaranth/compat/genlib/__init__.py b/amaranth/compat/genlib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/amaranth/compat/genlib/cdc.py b/amaranth/compat/genlib/cdc.py new file mode 100644 index 0000000..95ae22a --- /dev/null +++ b/amaranth/compat/genlib/cdc.py @@ -0,0 +1,74 @@ +import warnings + +from ..._utils import deprecated +from ...lib.cdc import FFSynchronizer as NativeFFSynchronizer +from ...lib.cdc import PulseSynchronizer as NativePulseSynchronizer +from ...hdl.ast import * +from ..fhdl.module import CompatModule +from ..fhdl.structure import If + + +__all__ = ["MultiReg", "PulseSynchronizer", "GrayCounter", "GrayDecoder"] + + +class MultiReg(NativeFFSynchronizer): + def __init__(self, i, o, odomain="sync", n=2, reset=0): + old_opts = [] + new_opts = [] + if odomain != "sync": + old_opts.append(", odomain={!r}".format(odomain)) + new_opts.append(", o_domain={!r}".format(odomain)) + if n != 2: + old_opts.append(", n={!r}".format(n)) + new_opts.append(", stages={!r}".format(n)) + warnings.warn("instead of `MultiReg(...{})`, use `FFSynchronizer(...{})`" + .format("".join(old_opts), "".join(new_opts)), + DeprecationWarning, stacklevel=2) + super().__init__(i, o, o_domain=odomain, stages=n, reset=reset) + self.odomain = odomain + + +@deprecated("instead of `migen.genlib.cdc.PulseSynchronizer`, use `amaranth.lib.cdc.PulseSynchronizer`") +class PulseSynchronizer(NativePulseSynchronizer): + def __init__(self, idomain, odomain): + super().__init__(i_domain=idomain, o_domain=odomain) + + +@deprecated("instead of `migen.genlib.cdc.GrayCounter`, use `amaranth.lib.coding.GrayEncoder`") +class GrayCounter(CompatModule): + def __init__(self, width): + self.ce = Signal() + self.q = Signal(width) + self.q_next = Signal(width) + self.q_binary = Signal(width) + self.q_next_binary = Signal(width) + + ### + + self.comb += [ + If(self.ce, + self.q_next_binary.eq(self.q_binary + 1) + ).Else( + self.q_next_binary.eq(self.q_binary) + ), + self.q_next.eq(self.q_next_binary ^ self.q_next_binary[1:]) + ] + self.sync += [ + self.q_binary.eq(self.q_next_binary), + self.q.eq(self.q_next) + ] + + +@deprecated("instead of `migen.genlib.cdc.GrayDecoder`, use `amaranth.lib.coding.GrayDecoder`") +class GrayDecoder(CompatModule): + def __init__(self, width): + self.i = Signal(width) + self.o = Signal(width, reset_less=True) + + # # # + + o_comb = Signal(width) + self.comb += o_comb[-1].eq(self.i[-1]) + for i in reversed(range(width-1)): + self.comb += o_comb[i].eq(o_comb[i+1] ^ self.i[i]) + self.sync += self.o.eq(o_comb) diff --git a/amaranth/compat/genlib/coding.py b/amaranth/compat/genlib/coding.py new file mode 100644 index 0000000..44154a2 --- /dev/null +++ b/amaranth/compat/genlib/coding.py @@ -0,0 +1,4 @@ +from ...lib.coding import * + + +__all__ = ["Encoder", "PriorityEncoder", "Decoder", "PriorityDecoder"] diff --git a/amaranth/compat/genlib/fifo.py b/amaranth/compat/genlib/fifo.py new file mode 100644 index 0000000..a944428 --- /dev/null +++ b/amaranth/compat/genlib/fifo.py @@ -0,0 +1,147 @@ +from ..._utils import deprecated, extend +from ...lib.fifo import (FIFOInterface as NativeFIFOInterface, + SyncFIFO as NativeSyncFIFO, SyncFIFOBuffered as NativeSyncFIFOBuffered, + AsyncFIFO as NativeAsyncFIFO, AsyncFIFOBuffered as NativeAsyncFIFOBuffered) + + +__all__ = ["_FIFOInterface", "SyncFIFO", "SyncFIFOBuffered", "AsyncFIFO", "AsyncFIFOBuffered"] + + +class CompatFIFOInterface(NativeFIFOInterface): + @deprecated("attribute `fwft` must be provided to FIFOInterface constructor") + def __init__(self, width, depth): + super().__init__(width=width, depth=depth, fwft=False) + del self.fwft + + +@extend(NativeFIFOInterface) +@property +@deprecated("instead of `fifo.din`, use `fifo.w_data`") +def din(self): + return self.w_data + + +@extend(NativeFIFOInterface) +@NativeFIFOInterface.din.setter +@deprecated("instead of `fifo.din = x`, use `fifo.w_data = x`") +def din(self, w_data): + self.w_data = w_data + + +@extend(NativeFIFOInterface) +@property +@deprecated("instead of `fifo.writable`, use `fifo.w_rdy`") +def writable(self): + return self.w_rdy + + +@extend(NativeFIFOInterface) +@NativeFIFOInterface.writable.setter +@deprecated("instead of `fifo.writable = x`, use `fifo.w_rdy = x`") +def writable(self, w_rdy): + self.w_rdy = w_rdy + + +@extend(NativeFIFOInterface) +@property +@deprecated("instead of `fifo.we`, use `fifo.w_en`") +def we(self): + return self.w_en + + +@extend(NativeFIFOInterface) +@NativeFIFOInterface.we.setter +@deprecated("instead of `fifo.we = x`, use `fifo.w_en = x`") +def we(self, w_en): + self.w_en = w_en + + +@extend(NativeFIFOInterface) +@property +@deprecated("instead of `fifo.dout`, use `fifo.r_data`") +def dout(self): + return self.r_data + + +@extend(NativeFIFOInterface) +@NativeFIFOInterface.dout.setter +@deprecated("instead of `fifo.dout = x`, use `fifo.r_data = x`") +def dout(self, r_data): + self.r_data = r_data + + +@extend(NativeFIFOInterface) +@property +@deprecated("instead of `fifo.readable`, use `fifo.r_rdy`") +def readable(self): + return self.r_rdy + + +@extend(NativeFIFOInterface) +@NativeFIFOInterface.readable.setter +@deprecated("instead of `fifo.readable = x`, use `fifo.r_rdy = x`") +def readable(self, r_rdy): + self.r_rdy = r_rdy + + +@extend(NativeFIFOInterface) +@property +@deprecated("instead of `fifo.re`, use `fifo.r_en`") +def re(self): + return self.r_en + + +@extend(NativeFIFOInterface) +@NativeFIFOInterface.re.setter +@deprecated("instead of `fifo.re = x`, use `fifo.r_en = x`") +def re(self, r_en): + self.r_en = r_en + + +@extend(NativeFIFOInterface) +def read(self): + """Read method for simulation.""" + assert (yield self.r_rdy) + value = (yield self.r_data) + yield self.r_en.eq(1) + yield + yield self.r_en.eq(0) + yield + return value + +@extend(NativeFIFOInterface) +def write(self, data): + """Write method for simulation.""" + assert (yield self.w_rdy) + yield self.w_data.eq(data) + yield self.w_en.eq(1) + yield + yield self.w_en.eq(0) + yield + + +class CompatSyncFIFO(NativeSyncFIFO): + def __init__(self, width, depth, fwft=True): + super().__init__(width=width, depth=depth, fwft=fwft) + + +class CompatSyncFIFOBuffered(NativeSyncFIFOBuffered): + def __init__(self, width, depth): + super().__init__(width=width, depth=depth) + + +class CompatAsyncFIFO(NativeAsyncFIFO): + def __init__(self, width, depth): + super().__init__(width=width, depth=depth) + + +class CompatAsyncFIFOBuffered(NativeAsyncFIFOBuffered): + def __init__(self, width, depth): + super().__init__(width=width, depth=depth) + + +_FIFOInterface = CompatFIFOInterface +SyncFIFO = CompatSyncFIFO +SyncFIFOBuffered = CompatSyncFIFOBuffered +AsyncFIFO = CompatAsyncFIFO +AsyncFIFOBuffered = CompatAsyncFIFOBuffered diff --git a/amaranth/compat/genlib/fsm.py b/amaranth/compat/genlib/fsm.py new file mode 100644 index 0000000..6904550 --- /dev/null +++ b/amaranth/compat/genlib/fsm.py @@ -0,0 +1,193 @@ +from collections import OrderedDict + +from ..._utils import deprecated, _ignore_deprecated +from ...hdl.xfrm import ValueTransformer, StatementTransformer +from ...hdl.ast import * +from ...hdl.ast import Signal as NativeSignal +from ..fhdl.module import CompatModule, CompatFinalizeError +from ..fhdl.structure import Signal, If, Case + + +__all__ = ["AnonymousState", "NextState", "NextValue", "FSM"] + + +class AnonymousState: + pass + + +class NextState(Statement): + def __init__(self, state): + super().__init__() + self.state = state + + +class NextValue(Statement): + def __init__(self, target, value): + super().__init__() + self.target = target + self.value = value + + +def _target_eq(a, b): + if type(a) != type(b): + return False + ty = type(a) + if ty == Const: + return a.value == b.value + elif ty == NativeSignal or ty == Signal: + return a is b + elif ty == Cat: + return all(_target_eq(x, y) for x, y in zip(a.l, b.l)) + elif ty == Slice: + return (_target_eq(a.value, b.value) + and a.start == b.start + and a.stop == b.stop) + elif ty == Part: + return (_target_eq(a.value, b.value) + and _target_eq(a.offset == b.offset) + and a.width == b.width) + elif ty == ArrayProxy: + return (all(_target_eq(x, y) for x, y in zip(a.choices, b.choices)) + and _target_eq(a.key, b.key)) + else: + raise ValueError("NextValue cannot be used with target type '{}'" + .format(ty)) + + +class _LowerNext(ValueTransformer, StatementTransformer): + def __init__(self, next_state_signal, encoding, aliases): + self.next_state_signal = next_state_signal + self.encoding = encoding + self.aliases = aliases + # (target, next_value_ce, next_value) + self.registers = [] + + def _get_register_control(self, target): + for x in self.registers: + if _target_eq(target, x[0]): + return x[1], x[2] + raise KeyError + + def on_unknown_statement(self, node): + if isinstance(node, NextState): + try: + actual_state = self.aliases[node.state] + except KeyError: + actual_state = node.state + return self.next_state_signal.eq(self.encoding[actual_state]) + elif isinstance(node, NextValue): + try: + next_value_ce, next_value = self._get_register_control(node.target) + except KeyError: + related = node.target if isinstance(node.target, Signal) else None + next_value = Signal(node.target.shape(), + name=None if related is None else "{}_fsm_next".format(related.name)) + next_value_ce = Signal( + name=None if related is None else "{}_fsm_next_ce".format(related.name)) + self.registers.append((node.target, next_value_ce, next_value)) + return next_value.eq(node.value), next_value_ce.eq(1) + else: + return node + + +@deprecated("instead of `migen.genlib.fsm.FSM()`, use `with m.FSM():`; note that there is no " + "replacement for `{before,after}_{entering,leaving}` and `delayed_enter` methods") +class FSM(CompatModule): + def __init__(self, reset_state=None): + self.actions = OrderedDict() + self.state_aliases = dict() + self.reset_state = reset_state + + self.before_entering_signals = OrderedDict() + self.before_leaving_signals = OrderedDict() + self.after_entering_signals = OrderedDict() + self.after_leaving_signals = OrderedDict() + + def act(self, state, *statements): + if self.finalized: + raise CompatFinalizeError + if self.reset_state is None: + self.reset_state = state + if state not in self.actions: + self.actions[state] = [] + self.actions[state] += statements + + def delayed_enter(self, name, target, delay): + if self.finalized: + raise CompatFinalizeError + if delay > 0: + state = name + for i in range(delay): + if i == delay - 1: + next_state = target + else: + next_state = AnonymousState() + self.act(state, NextState(next_state)) + state = next_state + else: + self.state_aliases[name] = target + + def ongoing(self, state): + is_ongoing = Signal() + self.act(state, is_ongoing.eq(1)) + return is_ongoing + + def _get_signal(self, d, state): + if state not in self.actions: + self.actions[state] = [] + try: + return d[state] + except KeyError: + is_el = Signal() + d[state] = is_el + return is_el + + def before_entering(self, state): + return self._get_signal(self.before_entering_signals, state) + + def before_leaving(self, state): + return self._get_signal(self.before_leaving_signals, state) + + def after_entering(self, state): + signal = self._get_signal(self.after_entering_signals, state) + self.sync += signal.eq(self.before_entering(state)) + return signal + + def after_leaving(self, state): + signal = self._get_signal(self.after_leaving_signals, state) + self.sync += signal.eq(self.before_leaving(state)) + return signal + + @_ignore_deprecated + def do_finalize(self): + nstates = len(self.actions) + self.encoding = dict((s, n) for n, s in enumerate(self.actions.keys())) + self.decoding = {n: s for s, n in self.encoding.items()} + + decoder = lambda n: "{}/{}".format(self.decoding[n], n) + self.state = Signal(range(nstates), reset=self.encoding[self.reset_state], decoder=decoder) + self.next_state = Signal.like(self.state) + + for state, signal in self.before_leaving_signals.items(): + encoded = self.encoding[state] + self.comb += signal.eq((self.state == encoded) & ~(self.next_state == encoded)) + if self.reset_state in self.after_entering_signals: + self.after_entering_signals[self.reset_state].reset = 1 + for state, signal in self.before_entering_signals.items(): + encoded = self.encoding[state] + self.comb += signal.eq(~(self.state == encoded) & (self.next_state == encoded)) + + self._finalize_sync(self._lower_controls()) + + def _lower_controls(self): + return _LowerNext(self.next_state, self.encoding, self.state_aliases) + + def _finalize_sync(self, ls): + cases = dict((self.encoding[k], ls.on_statement(v)) for k, v in self.actions.items() if v) + self.comb += [ + self.next_state.eq(self.state), + Case(self.state, cases).makedefault(self.encoding[self.reset_state]) + ] + self.sync += self.state.eq(self.next_state) + for register, next_value_ce, next_value in ls.registers: + self.sync += If(next_value_ce, register.eq(next_value)) diff --git a/amaranth/compat/genlib/record.py b/amaranth/compat/genlib/record.py new file mode 100644 index 0000000..69ce0e2 --- /dev/null +++ b/amaranth/compat/genlib/record.py @@ -0,0 +1,195 @@ +from ...tracer import * +from ..fhdl.structure import * + +from functools import reduce +from operator import or_ + + +(DIR_NONE, DIR_S_TO_M, DIR_M_TO_S) = range(3) + +# Possible layout elements: +# 1. (name, size) +# 2. (name, size, direction) +# 3. (name, sublayout) +# size can be an int, or a (int, bool) tuple for signed numbers +# sublayout must be a list + + +def set_layout_parameters(layout, **layout_dict): + def resolve(p): + if isinstance(p, str): + try: + return layout_dict[p] + except KeyError: + return p + else: + return p + + r = [] + for f in layout: + if isinstance(f[1], (int, tuple, str)): # cases 1/2 + if len(f) == 3: + r.append((f[0], resolve(f[1]), f[2])) + else: + r.append((f[0], resolve(f[1]))) + elif isinstance(f[1], list): # case 3 + r.append((f[0], set_layout_parameters(f[1], **layout_dict))) + else: + raise TypeError + return r + + +def layout_len(layout): + r = 0 + for f in layout: + if isinstance(f[1], (int, tuple)): # cases 1/2 + if len(f) == 3: + fname, fsize, fdirection = f + else: + fname, fsize = f + elif isinstance(f[1], list): # case 3 + fname, fsublayout = f + fsize = layout_len(fsublayout) + else: + raise TypeError + if isinstance(fsize, tuple): + r += fsize[0] + else: + r += fsize + return r + + +def layout_get(layout, name): + for f in layout: + if f[0] == name: + return f + raise KeyError(name) + + +def layout_partial(layout, *elements): + r = [] + for path in elements: + path_s = path.split("/") + last = path_s.pop() + copy_ref = layout + insert_ref = r + for hop in path_s: + name, copy_ref = layout_get(copy_ref, hop) + try: + name, insert_ref = layout_get(insert_ref, hop) + except KeyError: + new_insert_ref = [] + insert_ref.append((hop, new_insert_ref)) + insert_ref = new_insert_ref + insert_ref.append(layout_get(copy_ref, last)) + return r + + +class Record: + def __init__(self, layout, name=None, **kwargs): + try: + self.name = get_var_name() + except NameNotFound: + self.name = "" + self.layout = layout + + if self.name: + prefix = self.name + "_" + else: + prefix = "" + for f in self.layout: + if isinstance(f[1], (int, tuple)): # cases 1/2 + if(len(f) == 3): + fname, fsize, fdirection = f + else: + fname, fsize = f + finst = Signal(fsize, name=prefix + fname, **kwargs) + elif isinstance(f[1], list): # case 3 + fname, fsublayout = f + finst = Record(fsublayout, prefix + fname, **kwargs) + else: + raise TypeError + setattr(self, fname, finst) + + def eq(self, other): + return [getattr(self, f[0]).eq(getattr(other, f[0])) + for f in self.layout if hasattr(other, f[0])] + + def iter_flat(self): + for f in self.layout: + e = getattr(self, f[0]) + if isinstance(e, Signal): + if len(f) == 3: + yield e, f[2] + else: + yield e, DIR_NONE + elif isinstance(e, Record): + yield from e.iter_flat() + else: + raise TypeError + + def flatten(self): + return [signal for signal, direction in self.iter_flat()] + + def raw_bits(self): + return Cat(*self.flatten()) + + def connect(self, *slaves, keep=None, omit=None): + if keep is None: + _keep = set([f[0] for f in self.layout]) + elif isinstance(keep, list): + _keep = set(keep) + else: + _keep = keep + if omit is None: + _omit = set() + elif isinstance(omit, list): + _omit = set(omit) + else: + _omit = omit + + _keep = _keep - _omit + + r = [] + for f in self.layout: + field = f[0] + self_e = getattr(self, field) + if isinstance(self_e, Signal): + if field in _keep: + direction = f[2] + if direction == DIR_M_TO_S: + r += [getattr(slave, field).eq(self_e) for slave in slaves] + elif direction == DIR_S_TO_M: + r.append(self_e.eq(reduce(or_, [getattr(slave, field) for slave in slaves]))) + else: + raise TypeError + else: + for slave in slaves: + r += self_e.connect(getattr(slave, field), keep=keep, omit=omit) + return r + + def connect_flat(self, *slaves): + r = [] + iter_slaves = [slave.iter_flat() for slave in slaves] + for m_signal, m_direction in self.iter_flat(): + if m_direction == DIR_M_TO_S: + for iter_slave in iter_slaves: + s_signal, s_direction = next(iter_slave) + assert(s_direction == DIR_M_TO_S) + r.append(s_signal.eq(m_signal)) + elif m_direction == DIR_S_TO_M: + s_signals = [] + for iter_slave in iter_slaves: + s_signal, s_direction = next(iter_slave) + assert(s_direction == DIR_S_TO_M) + s_signals.append(s_signal) + r.append(m_signal.eq(reduce(or_, s_signals))) + else: + raise TypeError + return r + + def __len__(self): + return layout_len(self.layout) + + def __repr__(self): + return "" diff --git a/amaranth/compat/genlib/resetsync.py b/amaranth/compat/genlib/resetsync.py new file mode 100644 index 0000000..6f1a911 --- /dev/null +++ b/amaranth/compat/genlib/resetsync.py @@ -0,0 +1,16 @@ +from ..._utils import deprecated +from ...lib.cdc import ResetSynchronizer as NativeResetSynchronizer + + +__all__ = ["AsyncResetSynchronizer"] + + +@deprecated("instead of `migen.genlib.resetsync.AsyncResetSynchronizer`, " + "use `amaranth.lib.cdc.ResetSynchronizer`; note that ResetSynchronizer accepts " + "a clock domain name as an argument, not a clock domain object") +class CompatResetSynchronizer(NativeResetSynchronizer): + def __init__(self, cd, async_reset): + super().__init__(async_reset, domain=cd.name) + + +AsyncResetSynchronizer = CompatResetSynchronizer diff --git a/amaranth/compat/genlib/roundrobin.py b/amaranth/compat/genlib/roundrobin.py new file mode 100644 index 0000000..138cb11 --- /dev/null +++ b/amaranth/compat/genlib/roundrobin.py @@ -0,0 +1,58 @@ +import warnings + +from ..fhdl.structure import Signal, If, Case +from ..fhdl.module import CompatModule + + +__all__ = ["RoundRobin", "SP_WITHDRAW", "SP_CE"] + +(SP_WITHDRAW, SP_CE) = range(2) + +class CompatRoundRobin(CompatModule): + def __init__(self, n, switch_policy=SP_WITHDRAW): + self.request = Signal(n) + self.grant = Signal(max=max(2, n)) + self.switch_policy = switch_policy + if self.switch_policy == SP_CE: + warnings.warn("instead of `migen.genlib.roundrobin.RoundRobin`, " + "use `amaranth.lib.scheduler.RoundRobin`; note that RoundRobin does not " + "require a policy anymore but to get the same behavior as SP_CE you" + "should use an EnableInserter", + DeprecationWarning, stacklevel=1) + self.ce = Signal() + else: + warnings.warn("instead of `migen.genlib.roundrobin.RoundRobin`, " + "use `amaranth.lib.scheduler.RoundRobin`; note that RoundRobin does not " + "require a policy anymore", + DeprecationWarning, stacklevel=1) + + ### + + if n > 1: + cases = {} + for i in range(n): + switch = [] + for j in reversed(range(i+1, i+n)): + t = j % n + switch = [ + If(self.request[t], + self.grant.eq(t) + ).Else( + *switch + ) + ] + if self.switch_policy == SP_WITHDRAW: + case = [If(~self.request[i], *switch)] + else: + case = switch + cases[i] = case + statement = Case(self.grant, cases) + if self.switch_policy == SP_CE: + statement = If(self.ce, statement) + self.sync += statement + else: + self.comb += self.grant.eq(0) + + + +RoundRobin = CompatRoundRobin diff --git a/amaranth/compat/sim/__init__.py b/amaranth/compat/sim/__init__.py new file mode 100644 index 0000000..4c3d063 --- /dev/null +++ b/amaranth/compat/sim/__init__.py @@ -0,0 +1,54 @@ +import functools +import inspect +from collections.abc import Iterable +from ...hdl.cd import ClockDomain +from ...hdl.ir import Fragment +from ...sim import * + + +__all__ = ["run_simulation", "passive"] + + +def run_simulation(fragment_or_module, generators, clocks={"sync": 10}, vcd_name=None, + special_overrides={}): + assert not special_overrides + + if hasattr(fragment_or_module, "get_fragment"): + fragment = fragment_or_module.get_fragment() + else: + fragment = fragment_or_module + + fragment = Fragment.get(fragment, platform=None) + + if not isinstance(generators, dict): + generators = {"sync": generators} + if "sync" not in fragment.domains: + fragment.add_domains(ClockDomain("sync")) + + sim = Simulator(fragment) + for domain, period in clocks.items(): + sim.add_clock(period / 1e9, domain=domain) + for domain, processes in generators.items(): + def wrap(process): + def wrapper(): + yield from process + return wrapper + if isinstance(processes, Iterable) and not inspect.isgenerator(processes): + for process in processes: + sim.add_sync_process(wrap(process), domain=domain) + else: + sim.add_sync_process(wrap(processes), domain=domain) + + if vcd_name is not None: + with sim.write_vcd(vcd_name): + sim.run() + else: + sim.run() + + +def passive(generator): + @functools.wraps(generator) + def wrapper(*args, **kwargs): + yield Passive() + yield from generator(*args, **kwargs) + return wrapper diff --git a/amaranth/hdl/__init__.py b/amaranth/hdl/__init__.py new file mode 100644 index 0000000..770fc25 --- /dev/null +++ b/amaranth/hdl/__init__.py @@ -0,0 +1,20 @@ +from .ast import Shape, unsigned, signed +from .ast import Value, Const, C, Mux, Cat, Repl, Array, Signal, ClockSignal, ResetSignal +from .dsl import Module +from .cd import ClockDomain +from .ir import Elaboratable, Fragment, Instance +from .mem import Memory +from .rec import Record +from .xfrm import DomainRenamer, ResetInserter, EnableInserter + + +__all__ = [ + "Shape", "unsigned", "signed", + "Value", "Const", "C", "Mux", "Cat", "Repl", "Array", "Signal", "ClockSignal", "ResetSignal", + "Module", + "ClockDomain", + "Elaboratable", "Fragment", "Instance", + "Memory", + "Record", + "DomainRenamer", "ResetInserter", "EnableInserter", +] diff --git a/amaranth/hdl/ast.py b/amaranth/hdl/ast.py new file mode 100644 index 0000000..5792c84 --- /dev/null +++ b/amaranth/hdl/ast.py @@ -0,0 +1,1775 @@ +from abc import ABCMeta, abstractmethod +import warnings +import functools +from collections import OrderedDict +from collections.abc import Iterable, MutableMapping, MutableSet, MutableSequence +from enum import Enum +from itertools import chain + +from .. import tracer +from .._utils import * +from .._unused import * + + +__all__ = [ + "Shape", "signed", "unsigned", + "Value", "Const", "C", "AnyConst", "AnySeq", "Operator", "Mux", "Part", "Slice", "Cat", "Repl", + "Array", "ArrayProxy", + "Signal", "ClockSignal", "ResetSignal", + "UserValue", "ValueCastable", + "Sample", "Past", "Stable", "Rose", "Fell", "Initial", + "Statement", "Switch", + "Property", "Assign", "Assert", "Assume", "Cover", + "ValueKey", "ValueDict", "ValueSet", "SignalKey", "SignalDict", "SignalSet", +] + + +class DUID: + """Deterministic Unique IDentifier.""" + __next_uid = 0 + def __init__(self): + self.duid = DUID.__next_uid + DUID.__next_uid += 1 + + +class Shape: + """Bit width and signedness of a value. + + A ``Shape`` can be constructed using: + * explicit bit width and signedness; + * aliases :func:`signed` and :func:`unsigned`; + * casting from a variety of objects. + + A ``Shape`` can be cast from: + * an integer, where the integer specifies the bit width; + * a range, where the result is wide enough to represent any element of the range, and is + signed if any element of the range is signed; + * an :class:`Enum` with all integer members or :class:`IntEnum`, where the result is wide + enough to represent any member of the enumeration, and is signed if any member of + the enumeration is signed. + + Parameters + ---------- + width : int + The number of bits in the representation, including the sign bit (if any). + signed : bool + If ``False``, the value is unsigned. If ``True``, the value is signed two's complement. + """ + def __init__(self, width=1, signed=False): + if not isinstance(width, int) or width < 0: + raise TypeError("Width must be a non-negative integer, not {!r}" + .format(width)) + self.width = width + self.signed = signed + + def __iter__(self): + return iter((self.width, self.signed)) + + @staticmethod + def cast(obj, *, src_loc_at=0): + if isinstance(obj, Shape): + return obj + if isinstance(obj, int): + return Shape(obj) + if isinstance(obj, tuple): + width, signed = obj + warnings.warn("instead of `{tuple}`, use `{constructor}({width})`" + .format(constructor="signed" if signed else "unsigned", width=width, + tuple=obj), + DeprecationWarning, stacklevel=2 + src_loc_at) + return Shape(width, signed) + if isinstance(obj, range): + if len(obj) == 0: + return Shape(0, obj.start < 0) + signed = obj.start < 0 or (obj.stop - obj.step) < 0 + width = max(bits_for(obj.start, signed), + bits_for(obj.stop - obj.step, signed)) + return Shape(width, signed) + if isinstance(obj, type) and issubclass(obj, Enum): + min_value = min(member.value for member in obj) + max_value = max(member.value for member in obj) + if not isinstance(min_value, int) or not isinstance(max_value, int): + raise TypeError("Only enumerations with integer values can be used " + "as value shapes") + signed = min_value < 0 or max_value < 0 + width = max(bits_for(min_value, signed), bits_for(max_value, signed)) + return Shape(width, signed) + raise TypeError("Object {!r} cannot be used as value shape".format(obj)) + + def __repr__(self): + if self.signed: + return "signed({})".format(self.width) + else: + return "unsigned({})".format(self.width) + + def __eq__(self, other): + if isinstance(other, tuple) and len(other) == 2: + width, signed = other + if isinstance(width, int) and isinstance(signed, bool): + return self.width == width and self.signed == signed + else: + raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, " + "not {!r}" + .format(other)) + if not isinstance(other, Shape): + raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, " + "not {!r}" + .format(other)) + return self.width == other.width and self.signed == other.signed + + +def unsigned(width): + """Shorthand for ``Shape(width, signed=False)``.""" + return Shape(width, signed=False) + + +def signed(width): + """Shorthand for ``Shape(width, signed=True)``.""" + return Shape(width, signed=True) + + +class Value(metaclass=ABCMeta): + @staticmethod + def cast(obj): + """Converts ``obj`` to an Amaranth value. + + Booleans and integers are wrapped into a :class:`Const`. Enumerations whose members are + all integers are converted to a :class:`Const` with a shape that fits every member. + """ + if isinstance(obj, Value): + return obj + if isinstance(obj, int): + return Const(obj) + if isinstance(obj, Enum): + return Const(obj.value, Shape.cast(type(obj))) + if isinstance(obj, ValueCastable): + return obj.as_value() + raise TypeError("Object {!r} cannot be converted to an Amaranth value".format(obj)) + + def __init__(self, *, src_loc_at=0): + super().__init__() + self.src_loc = tracer.get_src_loc(1 + src_loc_at) + + def __bool__(self): + raise TypeError("Attempted to convert Amaranth value to Python boolean") + + def __invert__(self): + return Operator("~", [self]) + def __neg__(self): + return Operator("-", [self]) + + def __add__(self, other): + return Operator("+", [self, other]) + def __radd__(self, other): + return Operator("+", [other, self]) + def __sub__(self, other): + return Operator("-", [self, other]) + def __rsub__(self, other): + return Operator("-", [other, self]) + + def __mul__(self, other): + return Operator("*", [self, other]) + def __rmul__(self, other): + return Operator("*", [other, self]) + + def __check_divisor(self): + width, signed = self.shape() + if signed: + # Python's division semantics and Verilog's division semantics differ for negative + # divisors (Python uses div/mod, Verilog uses quo/rem); for now, avoid the issue + # completely by prohibiting such division operations. + raise NotImplementedError("Division by a signed value is not supported") + def __mod__(self, other): + other = Value.cast(other) + other.__check_divisor() + return Operator("%", [self, other]) + def __rmod__(self, other): + self.__check_divisor() + return Operator("%", [other, self]) + def __floordiv__(self, other): + other = Value.cast(other) + other.__check_divisor() + return Operator("//", [self, other]) + def __rfloordiv__(self, other): + self.__check_divisor() + return Operator("//", [other, self]) + + def __check_shamt(self): + width, signed = self.shape() + if signed: + # Neither Python nor HDLs implement shifts by negative values; prohibit any shifts + # by a signed value to make sure the shift amount can always be interpreted as + # an unsigned value. + raise TypeError("Shift amount must be unsigned") + def __lshift__(self, other): + other = Value.cast(other) + other.__check_shamt() + return Operator("<<", [self, other]) + def __rlshift__(self, other): + self.__check_shamt() + return Operator("<<", [other, self]) + def __rshift__(self, other): + other = Value.cast(other) + other.__check_shamt() + return Operator(">>", [self, other]) + def __rrshift__(self, other): + self.__check_shamt() + return Operator(">>", [other, self]) + + def __and__(self, other): + return Operator("&", [self, other]) + def __rand__(self, other): + return Operator("&", [other, self]) + def __xor__(self, other): + return Operator("^", [self, other]) + def __rxor__(self, other): + return Operator("^", [other, self]) + def __or__(self, other): + return Operator("|", [self, other]) + def __ror__(self, other): + return Operator("|", [other, self]) + + def __eq__(self, other): + return Operator("==", [self, other]) + def __ne__(self, other): + return Operator("!=", [self, other]) + def __lt__(self, other): + return Operator("<", [self, other]) + def __le__(self, other): + return Operator("<=", [self, other]) + def __gt__(self, other): + return Operator(">", [self, other]) + def __ge__(self, other): + return Operator(">=", [self, other]) + + def __abs__(self): + width, signed = self.shape() + if signed: + return Mux(self >= 0, self, -self) + else: + return self + + def __len__(self): + return self.shape().width + + def __getitem__(self, key): + n = len(self) + if isinstance(key, int): + if key not in range(-n, n): + raise IndexError(f"Index {key} is out of bounds for a {n}-bit value") + if key < 0: + key += n + return Slice(self, key, key + 1) + elif isinstance(key, slice): + start, stop, step = key.indices(n) + if step != 1: + return Cat(self[i] for i in range(start, stop, step)) + return Slice(self, start, stop) + else: + raise TypeError("Cannot index value with {}".format(repr(key))) + + def as_unsigned(self): + """Conversion to unsigned. + + Returns + ------- + Value, out + This ``Value`` reinterpreted as a unsigned integer. + """ + return Operator("u", [self]) + + def as_signed(self): + """Conversion to signed. + + Returns + ------- + Value, out + This ``Value`` reinterpreted as a signed integer. + """ + return Operator("s", [self]) + + def bool(self): + """Conversion to boolean. + + Returns + ------- + Value, out + ``1`` if any bits are set, ``0`` otherwise. + """ + return Operator("b", [self]) + + def any(self): + """Check if any bits are ``1``. + + Returns + ------- + Value, out + ``1`` if any bits are set, ``0`` otherwise. + """ + return Operator("r|", [self]) + + def all(self): + """Check if all bits are ``1``. + + Returns + ------- + Value, out + ``1`` if all bits are set, ``0`` otherwise. + """ + return Operator("r&", [self]) + + def xor(self): + """Compute pairwise exclusive-or of every bit. + + Returns + ------- + Value, out + ``1`` if an odd number of bits are set, ``0`` if an even number of bits are set. + """ + return Operator("r^", [self]) + + def implies(premise, conclusion): + """Implication. + + Returns + ------- + Value, out + ``0`` if ``premise`` is true and ``conclusion`` is not, ``1`` otherwise. + """ + return ~premise | conclusion + + def bit_select(self, offset, width): + """Part-select with bit granularity. + + Selects a constant width but variable offset part of a ``Value``, such that successive + parts overlap by all but 1 bit. + + Parameters + ---------- + offset : Value, int + Index of first selected bit. + width : int + Number of selected bits. + + Returns + ------- + Part, out + Selected part of the ``Value`` + """ + offset = Value.cast(offset) + if type(offset) is Const and isinstance(width, int): + return self[offset.value:offset.value + width] + return Part(self, offset, width, stride=1, src_loc_at=1) + + def word_select(self, offset, width): + """Part-select with word granularity. + + Selects a constant width but variable offset part of a ``Value``, such that successive + parts do not overlap. + + Parameters + ---------- + offset : Value, int + Index of first selected word. + width : int + Number of selected bits. + + Returns + ------- + Part, out + Selected part of the ``Value`` + """ + offset = Value.cast(offset) + if type(offset) is Const and isinstance(width, int): + return self[offset.value * width:(offset.value + 1) * width] + return Part(self, offset, width, stride=width, src_loc_at=1) + + def matches(self, *patterns): + """Pattern matching. + + Matches against a set of patterns, which may be integers or bit strings, recognizing + the same grammar as ``Case()``. + + Parameters + ---------- + patterns : int or str + Patterns to match against. + + Returns + ------- + Value, out + ``1`` if any pattern matches the value, ``0`` otherwise. + """ + matches = [] + for pattern in patterns: + if not isinstance(pattern, (int, str, Enum)): + raise SyntaxError("Match pattern must be an integer, a string, or an enumeration, " + "not {!r}" + .format(pattern)) + if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern): + raise SyntaxError("Match pattern '{}' must consist of 0, 1, and - (don't care) " + "bits, and may include whitespace" + .format(pattern)) + if (isinstance(pattern, str) and + len("".join(pattern.split())) != len(self)): + raise SyntaxError("Match pattern '{}' must have the same width as match value " + "(which is {})" + .format(pattern, len(self))) + if isinstance(pattern, int) and bits_for(pattern) > len(self): + warnings.warn("Match pattern '{:b}' is wider than match value " + "(which has width {}); comparison will never be true" + .format(pattern, len(self)), + SyntaxWarning, stacklevel=3) + continue + if isinstance(pattern, str): + pattern = "".join(pattern.split()) # remove whitespace + mask = int(pattern.replace("0", "1").replace("-", "0"), 2) + pattern = int(pattern.replace("-", "0"), 2) + matches.append((self & mask) == pattern) + elif isinstance(pattern, int): + matches.append(self == pattern) + elif isinstance(pattern, Enum): + matches.append(self == pattern.value) + else: + assert False + if not matches: + return Const(0) + elif len(matches) == 1: + return matches[0] + else: + return Cat(*matches).any() + + def shift_left(self, amount): + """Shift left by constant amount. + + Parameters + ---------- + amount : int + Amount to shift by. + + Returns + ------- + Value, out + If the amount is positive, the input shifted left. Otherwise, the input shifted right. + """ + if not isinstance(amount, int): + raise TypeError("Shift amount must be an integer, not {!r}".format(amount)) + if amount < 0: + return self.shift_right(-amount) + if self.shape().signed: + return Cat(Const(0, amount), self).as_signed() + else: + return Cat(Const(0, amount), self) # unsigned + + def shift_right(self, amount): + """Shift right by constant amount. + + Parameters + ---------- + amount : int + Amount to shift by. + + Returns + ------- + Value, out + If the amount is positive, the input shifted right. Otherwise, the input shifted left. + """ + if not isinstance(amount, int): + raise TypeError("Shift amount must be an integer, not {!r}".format(amount)) + if amount < 0: + return self.shift_left(-amount) + if self.shape().signed: + return self[amount:].as_signed() + else: + return self[amount:] # unsigned + + def rotate_left(self, amount): + """Rotate left by constant amount. + + Parameters + ---------- + amount : int + Amount to rotate by. + + Returns + ------- + Value, out + If the amount is positive, the input rotated left. Otherwise, the input rotated right. + """ + if not isinstance(amount, int): + raise TypeError("Rotate amount must be an integer, not {!r}".format(amount)) + amount %= len(self) + return Cat(self[-amount:], self[:-amount]) # meow :3 + + def rotate_right(self, amount): + """Rotate right by constant amount. + + Parameters + ---------- + amount : int + Amount to rotate by. + + Returns + ------- + Value, out + If the amount is positive, the input rotated right. Otherwise, the input rotated right. + """ + if not isinstance(amount, int): + raise TypeError("Rotate amount must be an integer, not {!r}".format(amount)) + amount %= len(self) + return Cat(self[amount:], self[:amount]) + + def eq(self, value): + """Assignment. + + Parameters + ---------- + value : Value, in + Value to be assigned. + + Returns + ------- + Assign + Assignment statement that can be used in combinatorial or synchronous context. + """ + return Assign(self, value, src_loc_at=1) + + @abstractmethod + def shape(self): + """Bit width and signedness of a value. + + Returns + ------- + Shape + See :class:`Shape`. + + Examples + -------- + >>> Signal(8).shape() + Shape(width=8, signed=False) + >>> Const(0xaa).shape() + Shape(width=8, signed=False) + """ + pass # :nocov: + + def _lhs_signals(self): + raise TypeError("Value {!r} cannot be used in assignments".format(self)) + + @abstractmethod + def _rhs_signals(self): + pass # :nocov: + + def _as_const(self): + raise TypeError("Value {!r} cannot be evaluated as constant".format(self)) + + __hash__ = None + + +@final +class Const(Value): + """A constant, literal integer value. + + Parameters + ---------- + value : int + shape : int or tuple or None + Either an integer ``width`` or a tuple ``(width, signed)`` specifying the number of bits + in this constant and whether it is signed (can represent negative values). + ``shape`` defaults to the minimum possible width and signedness of ``value``. + + Attributes + ---------- + width : int + signed : bool + """ + src_loc = None + + @staticmethod + def normalize(value, shape): + width, signed = shape + mask = (1 << width) - 1 + value &= mask + if signed and value >> (width - 1): + value |= ~mask + return value + + def __init__(self, value, shape=None, *, src_loc_at=0): + # We deliberately do not call Value.__init__ here. + self.value = int(value) + if shape is None: + shape = Shape(bits_for(self.value), signed=self.value < 0) + elif isinstance(shape, int): + shape = Shape(shape, signed=self.value < 0) + else: + shape = Shape.cast(shape, src_loc_at=1 + src_loc_at) + self.width, self.signed = shape + self.value = self.normalize(self.value, shape) + + def shape(self): + return Shape(self.width, self.signed) + + def _rhs_signals(self): + return SignalSet() + + def _as_const(self): + return self.value + + def __repr__(self): + return "(const {}'{}d{})".format(self.width, "s" if self.signed else "", self.value) + + +C = Const # shorthand + + +class AnyValue(Value, DUID): + def __init__(self, shape, *, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at) + if not isinstance(self.width, int) or self.width < 0: + raise TypeError("Width must be a non-negative integer, not {!r}" + .format(self.width)) + + def shape(self): + return Shape(self.width, self.signed) + + def _rhs_signals(self): + return SignalSet() + + +@final +class AnyConst(AnyValue): + def __repr__(self): + return "(anyconst {}'{})".format(self.width, "s" if self.signed else "") + + +@final +class AnySeq(AnyValue): + def __repr__(self): + return "(anyseq {}'{})".format(self.width, "s" if self.signed else "") + + +@final +class Operator(Value): + def __init__(self, operator, operands, *, src_loc_at=0): + super().__init__(src_loc_at=1 + src_loc_at) + self.operator = operator + self.operands = [Value.cast(op) for op in operands] + + def shape(self): + def _bitwise_binary_shape(a_shape, b_shape): + a_bits, a_sign = a_shape + b_bits, b_sign = b_shape + if not a_sign and not b_sign: + # both operands unsigned + return Shape(max(a_bits, b_bits), False) + elif a_sign and b_sign: + # both operands signed + return Shape(max(a_bits, b_bits), True) + elif not a_sign and b_sign: + # first operand unsigned (add sign bit), second operand signed + return Shape(max(a_bits + 1, b_bits), True) + else: + # first signed, second operand unsigned (add sign bit) + return Shape(max(a_bits, b_bits + 1), True) + + op_shapes = list(map(lambda x: x.shape(), self.operands)) + if len(op_shapes) == 1: + (a_width, a_signed), = op_shapes + if self.operator in ("+", "~"): + return Shape(a_width, a_signed) + if self.operator == "-": + return Shape(a_width + 1, True) + if self.operator in ("b", "r|", "r&", "r^"): + return Shape(1, False) + if self.operator == "u": + return Shape(a_width, False) + if self.operator == "s": + return Shape(a_width, True) + elif len(op_shapes) == 2: + (a_width, a_signed), (b_width, b_signed) = op_shapes + if self.operator in ("+", "-"): + width, signed = _bitwise_binary_shape(*op_shapes) + return Shape(width + 1, signed) + if self.operator == "*": + return Shape(a_width + b_width, a_signed or b_signed) + if self.operator in ("//", "%"): + assert not b_signed + return Shape(a_width, a_signed) + if self.operator in ("<", "<=", "==", "!=", ">", ">="): + return Shape(1, False) + if self.operator in ("&", "^", "|"): + return _bitwise_binary_shape(*op_shapes) + if self.operator == "<<": + assert not b_signed + return Shape(a_width + 2 ** b_width - 1, a_signed) + if self.operator == ">>": + assert not b_signed + return Shape(a_width, a_signed) + elif len(op_shapes) == 3: + if self.operator == "m": + s_shape, a_shape, b_shape = op_shapes + return _bitwise_binary_shape(a_shape, b_shape) + raise NotImplementedError("Operator {}/{} not implemented" + .format(self.operator, len(op_shapes))) # :nocov: + + def _rhs_signals(self): + return union(op._rhs_signals() for op in self.operands) + + def __repr__(self): + return "({} {})".format(self.operator, " ".join(map(repr, self.operands))) + + +def Mux(sel, val1, val0): + """Choose between two values. + + Parameters + ---------- + sel : Value, in + Selector. + val1 : Value, in + val0 : Value, in + Input values. + + Returns + ------- + Value, out + Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``. + """ + return Operator("m", [sel, val1, val0]) + + +@final +class Slice(Value): + def __init__(self, value, start, stop, *, src_loc_at=0): + if not isinstance(start, int): + raise TypeError("Slice start must be an integer, not {!r}".format(start)) + if not isinstance(stop, int): + raise TypeError("Slice stop must be an integer, not {!r}".format(stop)) + + n = len(value) + if start not in range(-(n+1), n+1): + raise IndexError("Cannot start slice {} bits into {}-bit value".format(start, n)) + if start < 0: + start += n + if stop not in range(-(n+1), n+1): + raise IndexError("Cannot stop slice {} bits into {}-bit value".format(stop, n)) + if stop < 0: + stop += n + if start > stop: + raise IndexError("Slice start {} must be less than slice stop {}".format(start, stop)) + + super().__init__(src_loc_at=src_loc_at) + self.value = Value.cast(value) + self.start = int(start) + self.stop = int(stop) + + def shape(self): + return Shape(self.stop - self.start) + + def _lhs_signals(self): + return self.value._lhs_signals() + + def _rhs_signals(self): + return self.value._rhs_signals() + + def __repr__(self): + return "(slice {} {}:{})".format(repr(self.value), self.start, self.stop) + + +@final +class Part(Value): + def __init__(self, value, offset, width, stride=1, *, src_loc_at=0): + if not isinstance(width, int) or width < 0: + raise TypeError("Part width must be a non-negative integer, not {!r}".format(width)) + if not isinstance(stride, int) or stride <= 0: + raise TypeError("Part stride must be a positive integer, not {!r}".format(stride)) + + super().__init__(src_loc_at=src_loc_at) + self.value = value + self.offset = Value.cast(offset) + self.width = width + self.stride = stride + + def shape(self): + return Shape(self.width) + + def _lhs_signals(self): + return self.value._lhs_signals() + + def _rhs_signals(self): + return self.value._rhs_signals() | self.offset._rhs_signals() + + def __repr__(self): + return "(part {} {} {} {})".format(repr(self.value), repr(self.offset), + self.width, self.stride) + + +@final +class Cat(Value): + """Concatenate values. + + Form a compound ``Value`` from several smaller ones by concatenation. + The first argument occupies the lower bits of the result. + The return value can be used on either side of an assignment, that + is, the concatenated value can be used as an argument on the RHS or + as a target on the LHS. If it is used on the LHS, it must solely + consist of ``Signal`` s, slices of ``Signal`` s, and other concatenations + meeting these properties. The bit length of the return value is the sum of + the bit lengths of the arguments:: + + len(Cat(args)) == sum(len(arg) for arg in args) + + Parameters + ---------- + *args : Values or iterables of Values, inout + ``Value`` s to be concatenated. + + Returns + ------- + Value, inout + Resulting ``Value`` obtained by concatentation. + """ + def __init__(self, *args, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + self.parts = [Value.cast(v) for v in flatten(args)] + + def shape(self): + return Shape(sum(len(part) for part in self.parts)) + + def _lhs_signals(self): + return union((part._lhs_signals() for part in self.parts), start=SignalSet()) + + def _rhs_signals(self): + return union((part._rhs_signals() for part in self.parts), start=SignalSet()) + + def _as_const(self): + value = 0 + for part in reversed(self.parts): + value <<= len(part) + value |= part._as_const() + return value + + def __repr__(self): + return "(cat {})".format(" ".join(map(repr, self.parts))) + + +@final +class Repl(Value): + """Replicate a value + + An input value is replicated (repeated) several times + to be used on the RHS of assignments:: + + len(Repl(s, n)) == len(s) * n + + Parameters + ---------- + value : Value, in + Input value to be replicated. + count : int + Number of replications. + + Returns + ------- + Repl, out + Replicated value. + """ + def __init__(self, value, count, *, src_loc_at=0): + if not isinstance(count, int) or count < 0: + raise TypeError("Replication count must be a non-negative integer, not {!r}" + .format(count)) + + super().__init__(src_loc_at=src_loc_at) + self.value = Value.cast(value) + self.count = count + + def shape(self): + return Shape(len(self.value) * self.count) + + def _rhs_signals(self): + return self.value._rhs_signals() + + def __repr__(self): + return "(repl {!r} {})".format(self.value, self.count) + + +# @final +class Signal(Value, DUID): + """A varying integer value. + + Parameters + ---------- + shape : ``Shape``-castable object or None + Specification for the number of bits in this ``Signal`` and its signedness (whether it + can represent negative values). See ``Shape.cast`` for details. + If not specified, ``shape`` defaults to 1-bit and non-signed. + name : str + Name hint for this signal. If ``None`` (default) the name is inferred from the variable + name this ``Signal`` is assigned to. + reset : int or integral Enum + Reset (synchronous) or default (combinatorial) value. + When this ``Signal`` is assigned to in synchronous context and the corresponding clock + domain is reset, the ``Signal`` assumes the given value. When this ``Signal`` is unassigned + in combinatorial context (due to conditional assignments not being taken), the ``Signal`` + assumes its ``reset`` value. Defaults to 0. + reset_less : bool + If ``True``, do not generate reset logic for this ``Signal`` in synchronous statements. + The ``reset`` value is only used as a combinatorial default or as the initial value. + Defaults to ``False``. + attrs : dict + Dictionary of synthesis attributes. + decoder : function or Enum + A function converting integer signal values to human-readable strings (e.g. FSM state + names). If an ``Enum`` subclass is passed, it is concisely decoded using format string + ``"{0.name:}/{0.value:}"``, or a number if the signal value is not a member of + the enumeration. + + Attributes + ---------- + width : int + signed : bool + name : str + reset : int + reset_less : bool + attrs : dict + decoder : function + """ + + def __init__(self, shape=None, *, name=None, reset=0, reset_less=False, + attrs=None, decoder=None, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + + if name is not None and not isinstance(name, str): + raise TypeError("Name must be a string, not {!r}".format(name)) + self.name = name or tracer.get_var_name(depth=2 + src_loc_at, default="$signal") + + if shape is None: + shape = unsigned(1) + self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at) + + if isinstance(reset, Enum): + reset = reset.value + if not isinstance(reset, int): + raise TypeError("Reset value has to be an int or an integral Enum") + + reset_width = bits_for(reset, self.signed) + if reset != 0 and reset_width > self.width: + warnings.warn("Reset value {!r} requires {} bits to represent, but the signal " + "only has {} bits" + .format(reset, reset_width, self.width), + SyntaxWarning, stacklevel=2 + src_loc_at) + + self.reset = reset + self.reset_less = bool(reset_less) + + self.attrs = OrderedDict(() if attrs is None else attrs) + + if decoder is None and isinstance(shape, type) and issubclass(shape, Enum): + decoder = shape + if isinstance(decoder, type) and issubclass(decoder, Enum): + def enum_decoder(value): + try: + return "{0.name:}/{0.value:}".format(decoder(value)) + except ValueError: + return str(value) + self.decoder = enum_decoder + self._enum_class = decoder + else: + self.decoder = decoder + self._enum_class = None + + # Not a @classmethod because amaranth.compat requires it. + @staticmethod + def like(other, *, name=None, name_suffix=None, src_loc_at=0, **kwargs): + """Create Signal based on another. + + Parameters + ---------- + other : Value + Object to base this Signal on. + """ + if name is not None: + new_name = str(name) + elif name_suffix is not None: + new_name = other.name + str(name_suffix) + else: + new_name = tracer.get_var_name(depth=2 + src_loc_at, default="$like") + kw = dict(shape=Value.cast(other).shape(), name=new_name) + if isinstance(other, Signal): + kw.update(reset=other.reset, reset_less=other.reset_less, + attrs=other.attrs, decoder=other.decoder) + kw.update(kwargs) + return Signal(**kw, src_loc_at=1 + src_loc_at) + + def shape(self): + return Shape(self.width, self.signed) + + def _lhs_signals(self): + return SignalSet((self,)) + + def _rhs_signals(self): + return SignalSet((self,)) + + def __repr__(self): + return "(sig {})".format(self.name) + + +@final +class ClockSignal(Value): + """Clock signal for a clock domain. + + Any ``ClockSignal`` is equivalent to ``cd.clk`` for a clock domain with the corresponding name. + All of these signals ultimately refer to the same signal, but they can be manipulated + independently of the clock domain, even before the clock domain is created. + + Parameters + ---------- + domain : str + Clock domain to obtain a clock signal for. Defaults to ``"sync"``. + """ + def __init__(self, domain="sync", *, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + if not isinstance(domain, str): + raise TypeError("Clock domain name must be a string, not {!r}".format(domain)) + if domain == "comb": + raise ValueError("Domain '{}' does not have a clock".format(domain)) + self.domain = domain + + def shape(self): + return Shape(1) + + def _lhs_signals(self): + return SignalSet((self,)) + + def _rhs_signals(self): + raise NotImplementedError("ClockSignal must be lowered to a concrete signal") # :nocov: + + def __repr__(self): + return "(clk {})".format(self.domain) + + +@final +class ResetSignal(Value): + """Reset signal for a clock domain. + + Any ``ResetSignal`` is equivalent to ``cd.rst`` for a clock domain with the corresponding name. + All of these signals ultimately refer to the same signal, but they can be manipulated + independently of the clock domain, even before the clock domain is created. + + Parameters + ---------- + domain : str + Clock domain to obtain a reset signal for. Defaults to ``"sync"``. + allow_reset_less : bool + If the clock domain is reset-less, act as a constant ``0`` instead of reporting an error. + """ + def __init__(self, domain="sync", allow_reset_less=False, *, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + if not isinstance(domain, str): + raise TypeError("Clock domain name must be a string, not {!r}".format(domain)) + if domain == "comb": + raise ValueError("Domain '{}' does not have a reset".format(domain)) + self.domain = domain + self.allow_reset_less = allow_reset_less + + def shape(self): + return Shape(1) + + def _lhs_signals(self): + return SignalSet((self,)) + + def _rhs_signals(self): + raise NotImplementedError("ResetSignal must be lowered to a concrete signal") # :nocov: + + def __repr__(self): + return "(rst {})".format(self.domain) + + +class Array(MutableSequence): + """Addressable multiplexer. + + An array is similar to a ``list`` that can also be indexed by ``Value``s; indexing by an integer or a slice works the same as for Python lists, but indexing by a ``Value`` results + in a proxy. + + The array proxy can be used as an ordinary ``Value``, i.e. participate in calculations and + assignments, provided that all elements of the array are values. The array proxy also supports + attribute access and further indexing, each returning another array proxy; this means that + the results of indexing into arrays, arrays of records, and arrays of arrays can all + be used as first-class values. + + It is an error to change an array or any of its elements after an array proxy was created. + Changing the array directly will raise an exception. However, it is not possible to detect + the elements being modified; if an element's attribute or element is modified after the proxy + for it has been created, the proxy will refer to stale data. + + Examples + -------- + + Simple array:: + + gpios = Array(Signal() for _ in range(10)) + with m.If(bus.we): + m.d.sync += gpios[bus.addr].eq(bus.w_data) + with m.Else(): + m.d.sync += bus.r_data.eq(gpios[bus.addr]) + + Multidimensional array:: + + mult = Array(Array(x * y for y in range(10)) for x in range(10)) + a = Signal.range(10) + b = Signal.range(10) + r = Signal(8) + m.d.comb += r.eq(mult[a][b]) + + Array of records:: + + layout = [ + ("r_data", 16), + ("r_en", 1), + ] + buses = Array(Record(layout) for busno in range(4)) + master = Record(layout) + m.d.comb += [ + buses[sel].r_en.eq(master.r_en), + master.r_data.eq(buses[sel].r_data), + ] + """ + def __init__(self, iterable=()): + self._inner = list(iterable) + self._proxy_at = None + self._mutable = True + + def __getitem__(self, index): + if isinstance(index, Value): + if self._mutable: + self._proxy_at = tracer.get_src_loc() + self._mutable = False + return ArrayProxy(self, index) + else: + return self._inner[index] + + def __len__(self): + return len(self._inner) + + def _check_mutability(self): + if not self._mutable: + raise ValueError("Array can no longer be mutated after it was indexed with a value " + "at {}:{}".format(*self._proxy_at)) + + def __setitem__(self, index, value): + self._check_mutability() + self._inner[index] = value + + def __delitem__(self, index): + self._check_mutability() + del self._inner[index] + + def insert(self, index, value): + self._check_mutability() + self._inner.insert(index, value) + + def __repr__(self): + return "(array{} [{}])".format(" mutable" if self._mutable else "", + ", ".join(map(repr, self._inner))) + + +@final +class ArrayProxy(Value): + def __init__(self, elems, index, *, src_loc_at=0): + super().__init__(src_loc_at=1 + src_loc_at) + self.elems = elems + self.index = Value.cast(index) + + def __getattr__(self, attr): + return ArrayProxy([getattr(elem, attr) for elem in self.elems], self.index) + + def __getitem__(self, index): + return ArrayProxy([ elem[index] for elem in self.elems], self.index) + + def _iter_as_values(self): + return (Value.cast(elem) for elem in self.elems) + + def shape(self): + unsigned_width = signed_width = 0 + has_unsigned = has_signed = False + for elem_width, elem_signed in (elem.shape() for elem in self._iter_as_values()): + if elem_signed: + has_signed = True + signed_width = max(signed_width, elem_width) + else: + has_unsigned = True + unsigned_width = max(unsigned_width, elem_width) + # The shape of the proxy must be such that it preserves the mathematical value of the array + # elements. I.e., shape-wise, an array proxy must be identical to an equivalent mux tree. + # To ensure this holds, if the array contains both signed and unsigned values, make sure + # that every unsigned value is zero-extended by at least one bit. + if has_signed and has_unsigned and unsigned_width >= signed_width: + # Array contains both signed and unsigned values, and at least one of the unsigned + # values won't be zero-extended otherwise. + return signed(unsigned_width + 1) + else: + # Array contains values of the same signedness, or else all of the unsigned values + # are zero-extended. + return Shape(max(unsigned_width, signed_width), has_signed) + + def _lhs_signals(self): + signals = union((elem._lhs_signals() for elem in self._iter_as_values()), + start=SignalSet()) + return signals + + def _rhs_signals(self): + signals = union((elem._rhs_signals() for elem in self._iter_as_values()), + start=SignalSet()) + return self.index._rhs_signals() | signals + + def __repr__(self): + return "(proxy (array [{}]) {!r})".format(", ".join(map(repr, self.elems)), self.index) + + +# TODO(amaranth-0.4): remove +class UserValue(Value): + """Value with custom lowering. + + A ``UserValue`` is a value whose precise representation does not have to be immediately known, + which is useful in certain metaprogramming scenarios. Instead of providing fixed semantics + upfront, it is kept abstract for as long as possible, only being lowered to a concrete Amaranth + value when required. + + Note that the ``lower`` method will only be called once; this is necessary to ensure that + Amaranth's view of representation of all values stays internally consistent. If the class + deriving from ``UserValue`` is mutable, then it must ensure that after ``lower`` is called, + it is not mutated in a way that changes its representation. + + The following is an incomplete list of actions that, when applied to an ``UserValue`` directly + or indirectly, will cause it to be lowered, provided as an illustrative reference: + * Querying the shape using ``.shape()`` or ``len()``; + * Creating a similarly shaped signal using ``Signal.like``; + * Indexing or iterating through individual bits; + * Adding an assignment to the value to a ``Module`` using ``m.d. +=``. + """ + @deprecated("instead of `UserValue`, use `ValueCastable`", stacklevel=3) + def __init__(self, *, src_loc_at=0): + super().__init__(src_loc_at=1 + src_loc_at) + self.__lowered = None + + @abstractmethod + def lower(self): + """Conversion to a concrete representation.""" + pass # :nocov: + + def _lazy_lower(self): + if self.__lowered is None: + lowered = self.lower() + if isinstance(lowered, UserValue): + lowered = lowered._lazy_lower() + self.__lowered = Value.cast(lowered) + return self.__lowered + + def shape(self): + return self._lazy_lower().shape() + + def _lhs_signals(self): + return self._lazy_lower()._lhs_signals() + + def _rhs_signals(self): + return self._lazy_lower()._rhs_signals() + + +class ValueCastable: + """Base class for classes which can be cast to Values. + + A ``ValueCastable`` can be cast to ``Value``, meaning its precise representation does not have + to be immediately known. This is useful in certain metaprogramming scenarios. Instead of + providing fixed semantics upfront, it is kept abstract for as long as possible, only being + cast to a concrete Amaranth value when required. + + Note that it is necessary to ensure that Amaranth's view of representation of all values stays + internally consistent. The class deriving from ``ValueCastable`` must decorate the ``as_value`` + method with the ``lowermethod`` decorator, which ensures that all calls to ``as_value`` return + the same ``Value`` representation. If the class deriving from ``ValueCastable`` is mutable, + it is up to the user to ensure that it is not mutated in a way that changes its representation + after the first call to ``as_value``. + """ + def __new__(cls, *args, **kwargs): + self = super().__new__(cls) + if not hasattr(self, "as_value"): + raise TypeError(f"Class '{cls.__name__}' deriving from `ValueCastable` must override " + "the `as_value` method") + + if not hasattr(self.as_value, "_ValueCastable__memoized"): + raise TypeError(f"Class '{cls.__name__}' deriving from `ValueCastable` must decorate " + "the `as_value` method with the `ValueCastable.lowermethod` decorator") + return self + + @staticmethod + def lowermethod(func): + """Decorator to memoize lowering methods. + + Ensures the decorated method is called only once, with subsequent method calls returning + the object returned by the first first method call. + + This decorator is required to decorate the ``as_value`` method of ``ValueCastable`` + subclasses. This is to ensure that Amaranth's view of representation of all values stays + internally consistent. + """ + @functools.wraps(func) + def wrapper_memoized(self, *args, **kwargs): + # Use `in self.__dict__` instead of `hasattr` to avoid interfering with custom + # `__getattr__` implementations. + if not "_ValueCastable__lowered_to" in self.__dict__: + self.__lowered_to = func(self, *args, **kwargs) + return self.__lowered_to + wrapper_memoized.__memoized = True + return wrapper_memoized + + +@final +class Sample(Value): + """Value from the past. + + A ``Sample`` of an expression is equal to the value of the expression ``clocks`` clock edges + of the ``domain`` clock back. If that moment is before the beginning of time, it is equal + to the value of the expression calculated as if each signal had its reset value. + """ + def __init__(self, expr, clocks, domain, *, src_loc_at=0): + super().__init__(src_loc_at=1 + src_loc_at) + self.value = Value.cast(expr) + self.clocks = int(clocks) + self.domain = domain + if not isinstance(self.value, (Const, Signal, ClockSignal, ResetSignal, Initial)): + raise TypeError("Sampled value must be a signal or a constant, not {!r}" + .format(self.value)) + if self.clocks < 0: + raise ValueError("Cannot sample a value {} cycles in the future" + .format(-self.clocks)) + if not (self.domain is None or isinstance(self.domain, str)): + raise TypeError("Domain name must be a string or None, not {!r}" + .format(self.domain)) + + def shape(self): + return self.value.shape() + + def _rhs_signals(self): + return SignalSet((self,)) + + def __repr__(self): + return "(sample {!r} @ {}[{}])".format( + self.value, "" if self.domain is None else self.domain, self.clocks) + + +def Past(expr, clocks=1, domain=None): + return Sample(expr, clocks, domain) + + +def Stable(expr, clocks=0, domain=None): + return Sample(expr, clocks + 1, domain) == Sample(expr, clocks, domain) + + +def Rose(expr, clocks=0, domain=None): + return ~Sample(expr, clocks + 1, domain) & Sample(expr, clocks, domain) + + +def Fell(expr, clocks=0, domain=None): + return Sample(expr, clocks + 1, domain) & ~Sample(expr, clocks, domain) + + +@final +class Initial(Value): + """Start indicator, for model checking. + + An ``Initial`` signal is ``1`` at the first cycle of model checking, and ``0`` at any other. + """ + def __init__(self, *, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + + def shape(self): + return Shape(1) + + def _rhs_signals(self): + return SignalSet((self,)) + + def __repr__(self): + return "(initial)" + + +class _StatementList(list): + def __repr__(self): + return "({})".format(" ".join(map(repr, self))) + + +class Statement: + def __init__(self, *, src_loc_at=0): + self.src_loc = tracer.get_src_loc(1 + src_loc_at) + + @staticmethod + def cast(obj): + if isinstance(obj, Iterable): + return _StatementList(list(chain.from_iterable(map(Statement.cast, obj)))) + else: + if isinstance(obj, Statement): + return _StatementList([obj]) + else: + raise TypeError("Object {!r} is not an Amaranth statement".format(obj)) + + +@final +class Assign(Statement): + def __init__(self, lhs, rhs, *, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + self.lhs = Value.cast(lhs) + self.rhs = Value.cast(rhs) + + def _lhs_signals(self): + return self.lhs._lhs_signals() + + def _rhs_signals(self): + return self.lhs._rhs_signals() | self.rhs._rhs_signals() + + def __repr__(self): + return "(eq {!r} {!r})".format(self.lhs, self.rhs) + + +class UnusedProperty(UnusedMustUse): + pass + + +class Property(Statement, MustUse): + _MustUse__warning = UnusedProperty + + def __init__(self, test, *, _check=None, _en=None, src_loc_at=0): + super().__init__(src_loc_at=src_loc_at) + self.test = Value.cast(test) + self._check = _check + self._en = _en + if self._check is None: + self._check = Signal(reset_less=True, name="${}$check".format(self._kind)) + self._check.src_loc = self.src_loc + if _en is None: + self._en = Signal(reset_less=True, name="${}$en".format(self._kind)) + self._en.src_loc = self.src_loc + + def _lhs_signals(self): + return SignalSet((self._en, self._check)) + + def _rhs_signals(self): + return self.test._rhs_signals() + + def __repr__(self): + return "({} {!r})".format(self._kind, self.test) + + +@final +class Assert(Property): + _kind = "assert" + + +@final +class Assume(Property): + _kind = "assume" + + +@final +class Cover(Property): + _kind = "cover" + + +# @final +class Switch(Statement): + def __init__(self, test, cases, *, src_loc=None, src_loc_at=0, case_src_locs={}): + if src_loc is None: + super().__init__(src_loc_at=src_loc_at) + else: + # Switch is a bit special in terms of location tracking because it is usually created + # long after the control has left the statement that directly caused its creation. + self.src_loc = src_loc + # Switch is also a bit special in that its parts also have location information. It can't + # be automatically traced, so whatever constructs a Switch may optionally provide it. + self.case_src_locs = {} + + self.test = Value.cast(test) + self.cases = OrderedDict() + for orig_keys, stmts in cases.items(): + # Map: None -> (); key -> (key,); (key...) -> (key...) + keys = orig_keys + if keys is None: + keys = () + if not isinstance(keys, tuple): + keys = (keys,) + # Map: 2 -> "0010"; "0010" -> "0010" + new_keys = () + key_mask = (1 << len(self.test)) - 1 + for key in keys: + if isinstance(key, str): + key = "".join(key.split()) # remove whitespace + elif isinstance(key, int): + key = format(key & key_mask, "b").rjust(len(self.test), "0") + elif isinstance(key, Enum): + key = format(key.value & key_mask, "b").rjust(len(self.test), "0") + else: + raise TypeError("Object {!r} cannot be used as a switch key" + .format(key)) + assert len(key) == len(self.test) + new_keys = (*new_keys, key) + if not isinstance(stmts, Iterable): + stmts = [stmts] + self.cases[new_keys] = Statement.cast(stmts) + if orig_keys in case_src_locs: + self.case_src_locs[new_keys] = case_src_locs[orig_keys] + + def _lhs_signals(self): + signals = union((s._lhs_signals() for ss in self.cases.values() for s in ss), + start=SignalSet()) + return signals + + def _rhs_signals(self): + signals = union((s._rhs_signals() for ss in self.cases.values() for s in ss), + start=SignalSet()) + return self.test._rhs_signals() | signals + + def __repr__(self): + def case_repr(keys, stmts): + stmts_repr = " ".join(map(repr, stmts)) + if keys == (): + return "(default {})".format(stmts_repr) + elif len(keys) == 1: + return "(case {} {})".format(keys[0], stmts_repr) + else: + return "(case ({}) {})".format(" ".join(keys), stmts_repr) + case_reprs = [case_repr(keys, stmts) for keys, stmts in self.cases.items()] + return "(switch {!r} {})".format(self.test, " ".join(case_reprs)) + + +class _MappedKeyCollection(metaclass=ABCMeta): + @abstractmethod + def _map_key(self, key): + pass # :nocov: + + @abstractmethod + def _unmap_key(self, key): + pass # :nocov: + + +class _MappedKeyDict(MutableMapping, _MappedKeyCollection): + def __init__(self, pairs=()): + self._storage = OrderedDict() + for key, value in pairs: + self[key] = value + + def __getitem__(self, key): + key = None if key is None else self._map_key(key) + return self._storage[key] + + def __setitem__(self, key, value): + key = None if key is None else self._map_key(key) + self._storage[key] = value + + def __delitem__(self, key): + key = None if key is None else self._map_key(key) + del self._storage[key] + + def __iter__(self): + for key in self._storage: + if key is None: + yield None + else: + yield self._unmap_key(key) + + def __eq__(self, other): + if not isinstance(other, type(self)): + return False + if len(self) != len(other): + return False + for ak, bk in zip(sorted(self._storage), sorted(other._storage)): + if ak != bk: + return False + if self._storage[ak] != other._storage[bk]: + return False + return True + + def __len__(self): + return len(self._storage) + + def __repr__(self): + pairs = ["({!r}, {!r})".format(k, v) for k, v in self.items()] + return "{}.{}([{}])".format(type(self).__module__, type(self).__name__, + ", ".join(pairs)) + + +class _MappedKeySet(MutableSet, _MappedKeyCollection): + def __init__(self, elements=()): + self._storage = OrderedDict() + for elem in elements: + self.add(elem) + + def add(self, value): + self._storage[self._map_key(value)] = None + + def update(self, values): + for value in values: + self.add(value) + + def discard(self, value): + if value in self: + del self._storage[self._map_key(value)] + + def __contains__(self, value): + return self._map_key(value) in self._storage + + def __iter__(self): + for key in [k for k in self._storage]: + yield self._unmap_key(key) + + def __len__(self): + return len(self._storage) + + def __repr__(self): + return "{}.{}({})".format(type(self).__module__, type(self).__name__, + ", ".join(repr(x) for x in self)) + + +class ValueKey: + def __init__(self, value): + self.value = Value.cast(value) + if isinstance(self.value, Const): + self._hash = hash(self.value.value) + elif isinstance(self.value, (Signal, AnyValue)): + self._hash = hash(self.value.duid) + elif isinstance(self.value, (ClockSignal, ResetSignal)): + self._hash = hash(self.value.domain) + elif isinstance(self.value, Operator): + self._hash = hash((self.value.operator, + tuple(ValueKey(o) for o in self.value.operands))) + elif isinstance(self.value, Slice): + self._hash = hash((ValueKey(self.value.value), self.value.start, self.value.stop)) + elif isinstance(self.value, Part): + self._hash = hash((ValueKey(self.value.value), ValueKey(self.value.offset), + self.value.width, self.value.stride)) + elif isinstance(self.value, Cat): + self._hash = hash(tuple(ValueKey(o) for o in self.value.parts)) + elif isinstance(self.value, ArrayProxy): + self._hash = hash((ValueKey(self.value.index), + tuple(ValueKey(e) for e in self.value._iter_as_values()))) + elif isinstance(self.value, Sample): + self._hash = hash((ValueKey(self.value.value), self.value.clocks, self.value.domain)) + elif isinstance(self.value, Initial): + self._hash = 0 + else: # :nocov: + raise TypeError("Object {!r} cannot be used as a key in value collections" + .format(self.value)) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if type(other) is not ValueKey: + return False + if type(self.value) is not type(other.value): + return False + + if isinstance(self.value, Const): + return self.value.value == other.value.value + elif isinstance(self.value, (Signal, AnyValue)): + return self.value is other.value + elif isinstance(self.value, (ClockSignal, ResetSignal)): + return self.value.domain == other.value.domain + elif isinstance(self.value, Operator): + return (self.value.operator == other.value.operator and + len(self.value.operands) == len(other.value.operands) and + all(ValueKey(a) == ValueKey(b) + for a, b in zip(self.value.operands, other.value.operands))) + elif isinstance(self.value, Slice): + return (ValueKey(self.value.value) == ValueKey(other.value.value) and + self.value.start == other.value.start and + self.value.stop == other.value.stop) + elif isinstance(self.value, Part): + return (ValueKey(self.value.value) == ValueKey(other.value.value) and + ValueKey(self.value.offset) == ValueKey(other.value.offset) and + self.value.width == other.value.width and + self.value.stride == other.value.stride) + elif isinstance(self.value, Cat): + return all(ValueKey(a) == ValueKey(b) + for a, b in zip(self.value.parts, other.value.parts)) + elif isinstance(self.value, ArrayProxy): + return (ValueKey(self.value.index) == ValueKey(other.value.index) and + len(self.value.elems) == len(other.value.elems) and + all(ValueKey(a) == ValueKey(b) + for a, b in zip(self.value._iter_as_values(), + other.value._iter_as_values()))) + elif isinstance(self.value, Sample): + return (ValueKey(self.value.value) == ValueKey(other.value.value) and + self.value.clocks == other.value.clocks and + self.value.domain == self.value.domain) + elif isinstance(self.value, Initial): + return True + else: # :nocov: + raise TypeError("Object {!r} cannot be used as a key in value collections" + .format(self.value)) + + def __lt__(self, other): + if not isinstance(other, ValueKey): + return False + if type(self.value) != type(other.value): + return False + + if isinstance(self.value, Const): + return self.value < other.value + elif isinstance(self.value, (Signal, AnyValue)): + return self.value.duid < other.value.duid + elif isinstance(self.value, Slice): + return (ValueKey(self.value.value) < ValueKey(other.value.value) and + self.value.start < other.value.start and + self.value.end < other.value.end) + else: # :nocov: + raise TypeError("Object {!r} cannot be used as a key in value collections") + + def __repr__(self): + return "<{}.ValueKey {!r}>".format(__name__, self.value) + + +class ValueDict(_MappedKeyDict): + _map_key = ValueKey + _unmap_key = lambda self, key: key.value + + +class ValueSet(_MappedKeySet): + _map_key = ValueKey + _unmap_key = lambda self, key: key.value + + +class SignalKey: + def __init__(self, signal): + self.signal = signal + if isinstance(signal, Signal): + self._intern = (0, signal.duid) + elif type(signal) is ClockSignal: + self._intern = (1, signal.domain) + elif type(signal) is ResetSignal: + self._intern = (2, signal.domain) + else: + raise TypeError("Object {!r} is not an Amaranth signal".format(signal)) + + def __hash__(self): + return hash(self._intern) + + def __eq__(self, other): + if type(other) is not SignalKey: + return False + return self._intern == other._intern + + def __lt__(self, other): + if type(other) is not SignalKey: + raise TypeError("Object {!r} cannot be compared to a SignalKey".format(signal)) + return self._intern < other._intern + + def __repr__(self): + return "<{}.SignalKey {!r}>".format(__name__, self.signal) + + +class SignalDict(_MappedKeyDict): + _map_key = SignalKey + _unmap_key = lambda self, key: key.signal + + +class SignalSet(_MappedKeySet): + _map_key = SignalKey + _unmap_key = lambda self, key: key.signal diff --git a/amaranth/hdl/cd.py b/amaranth/hdl/cd.py new file mode 100644 index 0000000..96af20c --- /dev/null +++ b/amaranth/hdl/cd.py @@ -0,0 +1,84 @@ +from .. import tracer +from .ast import Signal + + +__all__ = ["ClockDomain", "DomainError"] + + +class DomainError(Exception): + pass + + +class ClockDomain: + """Synchronous domain. + + Parameters + ---------- + name : str or None + Domain name. If ``None`` (the default) the name is inferred from the variable name this + ``ClockDomain`` is assigned to (stripping any `"cd_"` prefix). + reset_less : bool + If ``True``, the domain does not use a reset signal. Registers within this domain are + still all initialized to their reset state once, e.g. through Verilog `"initial"` + statements. + clk_edge : str + The edge of the clock signal on which signals are sampled. Must be one of "pos" or "neg". + async_reset : bool + If ``True``, the domain uses an asynchronous reset, and registers within this domain + are initialized to their reset state when reset level changes. Otherwise, registers + are initialized to reset state at the next clock cycle when reset is asserted. + local : bool + If ``True``, the domain will propagate only downwards in the design hierarchy. Otherwise, + the domain will propagate everywhere. + + Attributes + ---------- + clk : Signal, inout + The clock for this domain. Can be driven or used to drive other signals (preferably + in combinatorial context). + rst : Signal or None, inout + Reset signal for this domain. Can be driven or used to drive. + """ + + @staticmethod + def _name_for(domain_name, signal_name): + if domain_name == "sync": + return signal_name + else: + return "{}_{}".format(domain_name, signal_name) + + def __init__(self, name=None, *, clk_edge="pos", reset_less=False, async_reset=False, + local=False): + if name is None: + try: + name = tracer.get_var_name() + except tracer.NameNotFound: + raise ValueError("Clock domain name must be specified explicitly") + if name.startswith("cd_"): + name = name[3:] + if name == "comb": + raise ValueError("Domain '{}' may not be clocked".format(name)) + + if clk_edge not in ("pos", "neg"): + raise ValueError("Domain clock edge must be one of 'pos' or 'neg', not {!r}" + .format(clk_edge)) + + self.name = name + + self.clk = Signal(name=self._name_for(name, "clk"), src_loc_at=1) + self.clk_edge = clk_edge + + if reset_less: + self.rst = None + else: + self.rst = Signal(name=self._name_for(name, "rst"), src_loc_at=1) + + self.async_reset = async_reset + + self.local = local + + def rename(self, new_name): + self.name = new_name + self.clk.name = self._name_for(new_name, "clk") + if self.rst is not None: + self.rst.name = self._name_for(new_name, "rst") diff --git a/amaranth/hdl/dsl.py b/amaranth/hdl/dsl.py new file mode 100644 index 0000000..279e7c5 --- /dev/null +++ b/amaranth/hdl/dsl.py @@ -0,0 +1,546 @@ +from collections import OrderedDict +from contextlib import contextmanager, _GeneratorContextManager +from functools import wraps +from enum import Enum +import warnings + +from .._utils import flatten, bits_for +from .. import tracer +from .ast import * +from .ir import * +from .cd import * +from .xfrm import * + + +__all__ = ["SyntaxError", "SyntaxWarning", "Module"] + + +class SyntaxError(Exception): + pass + + +class SyntaxWarning(Warning): + pass + + +class _ModuleBuilderProxy: + def __init__(self, builder, depth): + object.__setattr__(self, "_builder", builder) + object.__setattr__(self, "_depth", depth) + + +class _ModuleBuilderDomain(_ModuleBuilderProxy): + def __init__(self, builder, depth, domain): + super().__init__(builder, depth) + self._domain = domain + + def __iadd__(self, assigns): + self._builder._add_statement(assigns, domain=self._domain, depth=self._depth) + return self + + +class _ModuleBuilderDomains(_ModuleBuilderProxy): + def __getattr__(self, name): + if name == "submodules": + warnings.warn("Using '.d.{}' would add statements to clock domain {!r}; " + "did you mean .{} instead?" + .format(name, name, name), + SyntaxWarning, stacklevel=2) + if name == "comb": + domain = None + else: + domain = name + return _ModuleBuilderDomain(self._builder, self._depth, domain) + + def __getitem__(self, name): + return self.__getattr__(name) + + def __setattr__(self, name, value): + if name == "_depth": + object.__setattr__(self, name, value) + elif not isinstance(value, _ModuleBuilderDomain): + raise AttributeError("Cannot assign 'd.{}' attribute; did you mean 'd.{} +='?" + .format(name, name)) + + def __setitem__(self, name, value): + return self.__setattr__(name, value) + + +class _ModuleBuilderRoot: + def __init__(self, builder, depth): + self._builder = builder + self.domain = self.d = _ModuleBuilderDomains(builder, depth) + + def __getattr__(self, name): + if name in ("comb", "sync"): + raise AttributeError("'{}' object has no attribute '{}'; did you mean 'd.{}'?" + .format(type(self).__name__, name, name)) + raise AttributeError("'{}' object has no attribute '{}'" + .format(type(self).__name__, name)) + + +class _ModuleBuilderSubmodules: + def __init__(self, builder): + object.__setattr__(self, "_builder", builder) + + def __iadd__(self, modules): + for module in flatten([modules]): + self._builder._add_submodule(module) + return self + + def __setattr__(self, name, submodule): + self._builder._add_submodule(submodule, name) + + def __setitem__(self, name, value): + return self.__setattr__(name, value) + + def __getattr__(self, name): + return self._builder._get_submodule(name) + + def __getitem__(self, name): + return self.__getattr__(name) + + +class _ModuleBuilderDomainSet: + def __init__(self, builder): + object.__setattr__(self, "_builder", builder) + + def __iadd__(self, domains): + for domain in flatten([domains]): + if not isinstance(domain, ClockDomain): + raise TypeError("Only clock domains may be added to `m.domains`, not {!r}" + .format(domain)) + self._builder._add_domain(domain) + return self + + def __setattr__(self, name, domain): + if not isinstance(domain, ClockDomain): + raise TypeError("Only clock domains may be added to `m.domains`, not {!r}" + .format(domain)) + if domain.name != name: + raise NameError("Clock domain name {!r} must match name in `m.domains.{} += ...` " + "syntax" + .format(domain.name, name)) + self._builder._add_domain(domain) + + +# It's not particularly clean to depend on an internal interface, but, unfortunately, __bool__ +# must be defined on a class to be called during implicit conversion. +class _GuardedContextManager(_GeneratorContextManager): + def __init__(self, keyword, func, args, kwds): + self.keyword = keyword + return super().__init__(func, args, kwds) + + def __bool__(self): + raise SyntaxError("`if m.{kw}(...):` does not work; use `with m.{kw}(...)`" + .format(kw=self.keyword)) + + +def _guardedcontextmanager(keyword): + def decorator(func): + @wraps(func) + def helper(*args, **kwds): + return _GuardedContextManager(keyword, func, args, kwds) + return helper + return decorator + + +class FSM: + def __init__(self, state, encoding, decoding): + self.state = state + self.encoding = encoding + self.decoding = decoding + + def ongoing(self, name): + if name not in self.encoding: + self.encoding[name] = len(self.encoding) + return Operator("==", [self.state, self.encoding[name]], src_loc_at=0) + + +class Module(_ModuleBuilderRoot, Elaboratable): + @classmethod + def __init_subclass__(cls): + raise SyntaxError("Instead of inheriting from `Module`, inherit from `Elaboratable` " + "and return a `Module` from the `elaborate(self, platform)` method") + + def __init__(self): + _ModuleBuilderRoot.__init__(self, self, depth=0) + self.submodules = _ModuleBuilderSubmodules(self) + self.domains = _ModuleBuilderDomainSet(self) + + self._statements = Statement.cast([]) + self._ctrl_context = None + self._ctrl_stack = [] + + self._driving = SignalDict() + self._named_submodules = {} + self._anon_submodules = [] + self._domains = {} + self._generated = {} + + def _check_context(self, construct, context): + if self._ctrl_context != context: + if self._ctrl_context is None: + raise SyntaxError("{} is not permitted outside of {}" + .format(construct, context)) + else: + if self._ctrl_context == "Switch": + secondary_context = "Case" + if self._ctrl_context == "FSM": + secondary_context = "State" + raise SyntaxError("{} is not permitted directly inside of {}; it is permitted " + "inside of {} {}" + .format(construct, self._ctrl_context, + self._ctrl_context, secondary_context)) + + def _get_ctrl(self, name): + if self._ctrl_stack: + top_name, top_data = self._ctrl_stack[-1] + if top_name == name: + return top_data + + def _flush_ctrl(self): + while len(self._ctrl_stack) > self.domain._depth: + self._pop_ctrl() + + def _set_ctrl(self, name, data): + self._flush_ctrl() + self._ctrl_stack.append((name, data)) + return data + + def _check_signed_cond(self, cond): + cond = Value.cast(cond) + width, signed = cond.shape() + if signed: + warnings.warn("Signed values in If/Elif conditions usually result from inverting " + "Python booleans with ~, which leads to unexpected results. " + "Replace `~flag` with `not flag`. (If this is a false positive, " + "silence this warning with `m.If(x)` → `m.If(x.bool())`.)", + SyntaxWarning, stacklevel=4) + return cond + + @_guardedcontextmanager("If") + def If(self, cond): + self._check_context("If", context=None) + cond = self._check_signed_cond(cond) + src_loc = tracer.get_src_loc(src_loc_at=1) + if_data = self._set_ctrl("If", { + "depth": self.domain._depth, + "tests": [], + "bodies": [], + "src_loc": src_loc, + "src_locs": [], + }) + try: + _outer_case, self._statements = self._statements, [] + self.domain._depth += 1 + yield + self._flush_ctrl() + if_data["tests"].append(cond) + if_data["bodies"].append(self._statements) + if_data["src_locs"].append(src_loc) + finally: + self.domain._depth -= 1 + self._statements = _outer_case + + @_guardedcontextmanager("Elif") + def Elif(self, cond): + self._check_context("Elif", context=None) + cond = self._check_signed_cond(cond) + src_loc = tracer.get_src_loc(src_loc_at=1) + if_data = self._get_ctrl("If") + if if_data is None or if_data["depth"] != self.domain._depth: + raise SyntaxError("Elif without preceding If") + try: + _outer_case, self._statements = self._statements, [] + self.domain._depth += 1 + yield + self._flush_ctrl() + if_data["tests"].append(cond) + if_data["bodies"].append(self._statements) + if_data["src_locs"].append(src_loc) + finally: + self.domain._depth -= 1 + self._statements = _outer_case + + @_guardedcontextmanager("Else") + def Else(self): + self._check_context("Else", context=None) + src_loc = tracer.get_src_loc(src_loc_at=1) + if_data = self._get_ctrl("If") + if if_data is None or if_data["depth"] != self.domain._depth: + raise SyntaxError("Else without preceding If/Elif") + try: + _outer_case, self._statements = self._statements, [] + self.domain._depth += 1 + yield + self._flush_ctrl() + if_data["bodies"].append(self._statements) + if_data["src_locs"].append(src_loc) + finally: + self.domain._depth -= 1 + self._statements = _outer_case + self._pop_ctrl() + + @contextmanager + def Switch(self, test): + self._check_context("Switch", context=None) + switch_data = self._set_ctrl("Switch", { + "test": Value.cast(test), + "cases": OrderedDict(), + "src_loc": tracer.get_src_loc(src_loc_at=1), + "case_src_locs": {}, + }) + try: + self._ctrl_context = "Switch" + self.domain._depth += 1 + yield + finally: + self.domain._depth -= 1 + self._ctrl_context = None + self._pop_ctrl() + + @contextmanager + def Case(self, *patterns): + self._check_context("Case", context="Switch") + src_loc = tracer.get_src_loc(src_loc_at=1) + switch_data = self._get_ctrl("Switch") + new_patterns = () + for pattern in patterns: + if not isinstance(pattern, (int, str, Enum)): + raise SyntaxError("Case pattern must be an integer, a string, or an enumeration, " + "not {!r}" + .format(pattern)) + if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern): + raise SyntaxError("Case pattern '{}' must consist of 0, 1, and - (don't care) " + "bits, and may include whitespace" + .format(pattern)) + if (isinstance(pattern, str) and + len("".join(pattern.split())) != len(switch_data["test"])): + raise SyntaxError("Case pattern '{}' must have the same width as switch value " + "(which is {})" + .format(pattern, len(switch_data["test"]))) + if isinstance(pattern, int) and bits_for(pattern) > len(switch_data["test"]): + warnings.warn("Case pattern '{:b}' is wider than switch value " + "(which has width {}); comparison will never be true" + .format(pattern, len(switch_data["test"])), + SyntaxWarning, stacklevel=3) + continue + if isinstance(pattern, Enum) and bits_for(pattern.value) > len(switch_data["test"]): + warnings.warn("Case pattern '{:b}' ({}.{}) is wider than switch value " + "(which has width {}); comparison will never be true" + .format(pattern.value, pattern.__class__.__name__, pattern.name, + len(switch_data["test"])), + SyntaxWarning, stacklevel=3) + continue + new_patterns = (*new_patterns, pattern) + try: + _outer_case, self._statements = self._statements, [] + self._ctrl_context = None + yield + self._flush_ctrl() + # If none of the provided cases can possibly be true, omit this branch completely. + # This needs to be differentiated from no cases being provided in the first place, + # which means the branch will always match. + if not (patterns and not new_patterns): + switch_data["cases"][new_patterns] = self._statements + switch_data["case_src_locs"][new_patterns] = src_loc + finally: + self._ctrl_context = "Switch" + self._statements = _outer_case + + def Default(self): + return self.Case() + + @contextmanager + def FSM(self, reset=None, domain="sync", name="fsm"): + self._check_context("FSM", context=None) + if domain == "comb": + raise ValueError("FSM may not be driven by the '{}' domain".format(domain)) + fsm_data = self._set_ctrl("FSM", { + "name": name, + "signal": Signal(name="{}_state".format(name), src_loc_at=2), + "reset": reset, + "domain": domain, + "encoding": OrderedDict(), + "decoding": OrderedDict(), + "states": OrderedDict(), + "src_loc": tracer.get_src_loc(src_loc_at=1), + "state_src_locs": {}, + }) + self._generated[name] = fsm = \ + FSM(fsm_data["signal"], fsm_data["encoding"], fsm_data["decoding"]) + try: + self._ctrl_context = "FSM" + self.domain._depth += 1 + yield fsm + for state_name in fsm_data["encoding"]: + if state_name not in fsm_data["states"]: + raise NameError("FSM state '{}' is referenced but not defined" + .format(state_name)) + finally: + self.domain._depth -= 1 + self._ctrl_context = None + self._pop_ctrl() + + @contextmanager + def State(self, name): + self._check_context("FSM State", context="FSM") + src_loc = tracer.get_src_loc(src_loc_at=1) + fsm_data = self._get_ctrl("FSM") + if name in fsm_data["states"]: + raise NameError("FSM state '{}' is already defined".format(name)) + if name not in fsm_data["encoding"]: + fsm_data["encoding"][name] = len(fsm_data["encoding"]) + try: + _outer_case, self._statements = self._statements, [] + self._ctrl_context = None + yield + self._flush_ctrl() + fsm_data["states"][name] = self._statements + fsm_data["state_src_locs"][name] = src_loc + finally: + self._ctrl_context = "FSM" + self._statements = _outer_case + + @property + def next(self): + raise SyntaxError("Only assignment to `m.next` is permitted") + + @next.setter + def next(self, name): + if self._ctrl_context != "FSM": + for level, (ctrl_name, ctrl_data) in enumerate(reversed(self._ctrl_stack)): + if ctrl_name == "FSM": + if name not in ctrl_data["encoding"]: + ctrl_data["encoding"][name] = len(ctrl_data["encoding"]) + self._add_statement( + assigns=[ctrl_data["signal"].eq(ctrl_data["encoding"][name])], + domain=ctrl_data["domain"], + depth=len(self._ctrl_stack)) + return + + raise SyntaxError("`m.next = <...>` is only permitted inside an FSM state") + + def _pop_ctrl(self): + name, data = self._ctrl_stack.pop() + src_loc = data["src_loc"] + + if name == "If": + if_tests, if_bodies = data["tests"], data["bodies"] + if_src_locs = data["src_locs"] + + tests, cases = [], OrderedDict() + for if_test, if_case in zip(if_tests + [None], if_bodies): + if if_test is not None: + if len(if_test) != 1: + if_test = if_test.bool() + tests.append(if_test) + + if if_test is not None: + match = ("1" + "-" * (len(tests) - 1)).rjust(len(if_tests), "-") + else: + match = None + cases[match] = if_case + + self._statements.append(Switch(Cat(tests), cases, + src_loc=src_loc, case_src_locs=dict(zip(cases, if_src_locs)))) + + if name == "Switch": + switch_test, switch_cases = data["test"], data["cases"] + switch_case_src_locs = data["case_src_locs"] + + self._statements.append(Switch(switch_test, switch_cases, + src_loc=src_loc, case_src_locs=switch_case_src_locs)) + + if name == "FSM": + fsm_signal, fsm_reset, fsm_encoding, fsm_decoding, fsm_states = \ + data["signal"], data["reset"], data["encoding"], data["decoding"], data["states"] + fsm_state_src_locs = data["state_src_locs"] + if not fsm_states: + return + fsm_signal.width = bits_for(len(fsm_encoding) - 1) + if fsm_reset is None: + fsm_signal.reset = fsm_encoding[next(iter(fsm_states))] + else: + fsm_signal.reset = fsm_encoding[fsm_reset] + # The FSM is encoded such that the state with encoding 0 is always the reset state. + fsm_decoding.update((n, s) for s, n in fsm_encoding.items()) + fsm_signal.decoder = lambda n: "{}/{}".format(fsm_decoding[n], n) + self._statements.append(Switch(fsm_signal, + OrderedDict((fsm_encoding[name], stmts) for name, stmts in fsm_states.items()), + src_loc=src_loc, case_src_locs={fsm_encoding[name]: fsm_state_src_locs[name] + for name in fsm_states})) + + def _add_statement(self, assigns, domain, depth, compat_mode=False): + def domain_name(domain): + if domain is None: + return "comb" + else: + return domain + + while len(self._ctrl_stack) > self.domain._depth: + self._pop_ctrl() + + for stmt in Statement.cast(assigns): + if not compat_mode and not isinstance(stmt, (Assign, Assert, Assume, Cover)): + raise SyntaxError( + "Only assignments and property checks may be appended to d.{}" + .format(domain_name(domain))) + + stmt._MustUse__used = True + stmt = SampleDomainInjector(domain)(stmt) + + for signal in stmt._lhs_signals(): + if signal not in self._driving: + self._driving[signal] = domain + elif self._driving[signal] != domain: + cd_curr = self._driving[signal] + raise SyntaxError( + "Driver-driver conflict: trying to drive {!r} from d.{}, but it is " + "already driven from d.{}" + .format(signal, domain_name(domain), domain_name(cd_curr))) + + self._statements.append(stmt) + + def _add_submodule(self, submodule, name=None): + if not hasattr(submodule, "elaborate"): + raise TypeError("Trying to add {!r}, which does not implement .elaborate(), as " + "a submodule".format(submodule)) + if name == None: + self._anon_submodules.append(submodule) + else: + if name in self._named_submodules: + raise NameError("Submodule named '{}' already exists".format(name)) + self._named_submodules[name] = submodule + + def _get_submodule(self, name): + if name in self._named_submodules: + return self._named_submodules[name] + else: + raise AttributeError("No submodule named '{}' exists".format(name)) + + def _add_domain(self, cd): + if cd.name in self._domains: + raise NameError("Clock domain named '{}' already exists".format(cd.name)) + self._domains[cd.name] = cd + + def _flush(self): + while self._ctrl_stack: + self._pop_ctrl() + + def elaborate(self, platform): + self._flush() + + fragment = Fragment() + for name in self._named_submodules: + fragment.add_subfragment(Fragment.get(self._named_submodules[name], platform), name) + for submodule in self._anon_submodules: + fragment.add_subfragment(Fragment.get(submodule, platform), None) + statements = SampleDomainInjector("sync")(self._statements) + fragment.add_statements(statements) + for signal, domain in self._driving.items(): + fragment.add_driver(signal, domain) + fragment.add_domains(self._domains.values()) + fragment.generated.update(self._generated) + return fragment diff --git a/amaranth/hdl/ir.py b/amaranth/hdl/ir.py new file mode 100644 index 0000000..3c1ec60 --- /dev/null +++ b/amaranth/hdl/ir.py @@ -0,0 +1,592 @@ +from abc import ABCMeta +from collections import defaultdict, OrderedDict +from functools import reduce +import warnings + +from .._utils import * +from .._unused import * +from .ast import * +from .cd import * + + +__all__ = ["UnusedElaboratable", "Elaboratable", "DriverConflict", "Fragment", "Instance"] + + +class UnusedElaboratable(UnusedMustUse): + pass + + +class Elaboratable(MustUse, metaclass=ABCMeta): + _MustUse__warning = UnusedElaboratable + + +class DriverConflict(UserWarning): + pass + + +class Fragment: + @staticmethod + def get(obj, platform): + code = None + while True: + if isinstance(obj, Fragment): + return obj + elif isinstance(obj, Elaboratable): + code = obj.elaborate.__code__ + obj._MustUse__used = True + obj = obj.elaborate(platform) + elif hasattr(obj, "elaborate"): + warnings.warn( + message="Class {!r} is an elaboratable that does not explicitly inherit from " + "Elaboratable; doing so would improve diagnostics" + .format(type(obj)), + category=RuntimeWarning, + stacklevel=2) + code = obj.elaborate.__code__ + obj = obj.elaborate(platform) + else: + raise AttributeError("Object {!r} cannot be elaborated".format(obj)) + if obj is None and code is not None: + warnings.warn_explicit( + message=".elaborate() returned None; missing return statement?", + category=UserWarning, + filename=code.co_filename, + lineno=code.co_firstlineno) + + def __init__(self): + self.ports = SignalDict() + self.drivers = OrderedDict() + self.statements = [] + self.domains = OrderedDict() + self.subfragments = [] + self.attrs = OrderedDict() + self.generated = OrderedDict() + self.flatten = False + + def add_ports(self, *ports, dir): + assert dir in ("i", "o", "io") + for port in flatten(ports): + self.ports[port] = dir + + def iter_ports(self, dir=None): + if dir is None: + yield from self.ports + else: + for port, port_dir in self.ports.items(): + if port_dir == dir: + yield port + + def add_driver(self, signal, domain=None): + if domain not in self.drivers: + self.drivers[domain] = SignalSet() + self.drivers[domain].add(signal) + + def iter_drivers(self): + for domain, signals in self.drivers.items(): + for signal in signals: + yield domain, signal + + def iter_comb(self): + if None in self.drivers: + yield from self.drivers[None] + + def iter_sync(self): + for domain, signals in self.drivers.items(): + if domain is None: + continue + for signal in signals: + yield domain, signal + + def iter_signals(self): + signals = SignalSet() + signals |= self.ports.keys() + for domain, domain_signals in self.drivers.items(): + if domain is not None: + cd = self.domains[domain] + signals.add(cd.clk) + if cd.rst is not None: + signals.add(cd.rst) + signals |= domain_signals + return signals + + def add_domains(self, *domains): + for domain in flatten(domains): + assert isinstance(domain, ClockDomain) + assert domain.name not in self.domains + self.domains[domain.name] = domain + + def iter_domains(self): + yield from self.domains + + def add_statements(self, *stmts): + for stmt in Statement.cast(stmts): + stmt._MustUse__used = True + self.statements.append(stmt) + + def add_subfragment(self, subfragment, name=None): + assert isinstance(subfragment, Fragment) + self.subfragments.append((subfragment, name)) + + def find_subfragment(self, name_or_index): + if isinstance(name_or_index, int): + if name_or_index < len(self.subfragments): + subfragment, name = self.subfragments[name_or_index] + return subfragment + raise NameError("No subfragment at index #{}".format(name_or_index)) + else: + for subfragment, name in self.subfragments: + if name == name_or_index: + return subfragment + raise NameError("No subfragment with name '{}'".format(name_or_index)) + + def find_generated(self, *path): + if len(path) > 1: + path_component, *path = path + return self.find_subfragment(path_component).find_generated(*path) + else: + item, = path + return self.generated[item] + + def elaborate(self, platform): + return self + + def _merge_subfragment(self, subfragment): + # Merge subfragment's everything except clock domains into this fragment. + # Flattening is done after clock domain propagation, so we can assume the domains + # are already the same in every involved fragment in the first place. + self.ports.update(subfragment.ports) + for domain, signal in subfragment.iter_drivers(): + self.add_driver(signal, domain) + self.statements += subfragment.statements + self.subfragments += subfragment.subfragments + + # Remove the merged subfragment. + found = False + for i, (check_subfrag, check_name) in enumerate(self.subfragments): # :nobr: + if subfragment == check_subfrag: + del self.subfragments[i] + found = True + break + assert found + + def _resolve_hierarchy_conflicts(self, hierarchy=("top",), mode="warn"): + assert mode in ("silent", "warn", "error") + + driver_subfrags = SignalDict() + memory_subfrags = OrderedDict() + def add_subfrag(registry, entity, entry): + # Because of missing domain insertion, at the point when this code runs, we have + # a mixture of bound and unbound {Clock,Reset}Signals. Map the bound ones to + # the actual signals (because the signal itself can be driven as well); but leave + # the unbound ones as it is, because there's no concrete signal for it yet anyway. + if isinstance(entity, ClockSignal) and entity.domain in self.domains: + entity = self.domains[entity.domain].clk + elif isinstance(entity, ResetSignal) and entity.domain in self.domains: + entity = self.domains[entity.domain].rst + + if entity not in registry: + registry[entity] = set() + registry[entity].add(entry) + + # For each signal driven by this fragment and/or its subfragments, determine which + # subfragments also drive it. + for domain, signal in self.iter_drivers(): + add_subfrag(driver_subfrags, signal, (None, hierarchy)) + + flatten_subfrags = set() + for i, (subfrag, name) in enumerate(self.subfragments): + if name is None: + name = "".format(i) + subfrag_hierarchy = hierarchy + (name,) + + if subfrag.flatten: + # Always flatten subfragments that explicitly request it. + flatten_subfrags.add((subfrag, subfrag_hierarchy)) + + if isinstance(subfrag, Instance): + # For memories (which are subfragments, but semantically a part of superfragment), + # record that this fragment is driving it. + if subfrag.type in ("$memrd", "$memwr"): + memory = subfrag.parameters["MEMID"] + add_subfrag(memory_subfrags, memory, (None, hierarchy)) + + # Never flatten instances. + continue + + # First, recurse into subfragments and let them detect driver conflicts as well. + subfrag_drivers, subfrag_memories = \ + subfrag._resolve_hierarchy_conflicts(subfrag_hierarchy, mode) + + # Second, classify subfragments by signals they drive and memories they use. + for signal in subfrag_drivers: + add_subfrag(driver_subfrags, signal, (subfrag, subfrag_hierarchy)) + for memory in subfrag_memories: + add_subfrag(memory_subfrags, memory, (subfrag, subfrag_hierarchy)) + + # Find out the set of subfragments that needs to be flattened into this fragment + # to resolve driver-driver conflicts. + def flatten_subfrags_if_needed(subfrags): + if len(subfrags) == 1: + return [] + flatten_subfrags.update((f, h) for f, h in subfrags if f is not None) + return list(sorted(".".join(h) for f, h in subfrags)) + + for signal, subfrags in driver_subfrags.items(): + subfrag_names = flatten_subfrags_if_needed(subfrags) + if not subfrag_names: + continue + + # While we're at it, show a message. + message = ("Signal '{}' is driven from multiple fragments: {}" + .format(signal, ", ".join(subfrag_names))) + if mode == "error": + raise DriverConflict(message) + elif mode == "warn": + message += "; hierarchy will be flattened" + warnings.warn_explicit(message, DriverConflict, *signal.src_loc) + + for memory, subfrags in memory_subfrags.items(): + subfrag_names = flatten_subfrags_if_needed(subfrags) + if not subfrag_names: + continue + + # While we're at it, show a message. + message = ("Memory '{}' is accessed from multiple fragments: {}" + .format(memory.name, ", ".join(subfrag_names))) + if mode == "error": + raise DriverConflict(message) + elif mode == "warn": + message += "; hierarchy will be flattened" + warnings.warn_explicit(message, DriverConflict, *memory.src_loc) + + # Flatten hierarchy. + for subfrag, subfrag_hierarchy in sorted(flatten_subfrags, key=lambda x: x[1]): + self._merge_subfragment(subfrag) + + # If we flattened anything, we might be in a situation where we have a driver conflict + # again, e.g. if we had a tree of fragments like A --- B --- C where only fragments + # A and C were driving a signal S. In that case, since B is not driving S itself, + # processing B will not result in any flattening, but since B is transitively driving S, + # processing A will flatten B into it. Afterwards, we have a tree like AB --- C, which + # has another conflict. + if any(flatten_subfrags): + # Try flattening again. + return self._resolve_hierarchy_conflicts(hierarchy, mode) + + # Nothing was flattened, we're done! + return (SignalSet(driver_subfrags.keys()), + set(memory_subfrags.keys())) + + def _propagate_domains_up(self, hierarchy=("top",)): + from .xfrm import DomainRenamer + + domain_subfrags = defaultdict(lambda: set()) + + # For each domain defined by a subfragment, determine which subfragments define it. + for i, (subfrag, name) in enumerate(self.subfragments): + # First, recurse into subfragments and let them propagate domains up as well. + hier_name = name + if hier_name is None: + hier_name = "".format(i) + subfrag._propagate_domains_up(hierarchy + (hier_name,)) + + # Second, classify subfragments by domains they define. + for domain_name, domain in subfrag.domains.items(): + if domain.local: + continue + domain_subfrags[domain_name].add((subfrag, name, i)) + + # For each domain defined by more than one subfragment, rename the domain in each + # of the subfragments such that they no longer conflict. + for domain_name, subfrags in domain_subfrags.items(): + if len(subfrags) == 1: + continue + + names = [n for f, n, i in subfrags] + if not all(names): + names = sorted("".format(i) if n is None else "'{}'".format(n) + for f, n, i in subfrags) + raise DomainError("Domain '{}' is defined by subfragments {} of fragment '{}'; " + "it is necessary to either rename subfragment domains " + "explicitly, or give names to subfragments" + .format(domain_name, ", ".join(names), ".".join(hierarchy))) + + if len(names) != len(set(names)): + names = sorted("#{}".format(i) for f, n, i in subfrags) + raise DomainError("Domain '{}' is defined by subfragments {} of fragment '{}', " + "some of which have identical names; it is necessary to either " + "rename subfragment domains explicitly, or give distinct names " + "to subfragments" + .format(domain_name, ", ".join(names), ".".join(hierarchy))) + + for subfrag, name, i in subfrags: + domain_name_map = {domain_name: "{}_{}".format(name, domain_name)} + self.subfragments[i] = (DomainRenamer(domain_name_map)(subfrag), name) + + # Finally, collect the (now unique) subfragment domains, and merge them into our domains. + for subfrag, name in self.subfragments: + for domain_name, domain in subfrag.domains.items(): + if domain.local: + continue + self.add_domains(domain) + + def _propagate_domains_down(self): + # For each domain defined in this fragment, ensure it also exists in all subfragments. + for subfrag, name in self.subfragments: + for domain in self.iter_domains(): + if domain in subfrag.domains: + assert self.domains[domain] is subfrag.domains[domain] + else: + subfrag.add_domains(self.domains[domain]) + + subfrag._propagate_domains_down() + + def _create_missing_domains(self, missing_domain, *, platform=None): + from .xfrm import DomainCollector + + collector = DomainCollector() + collector(self) + + new_domains = [] + for domain_name in collector.used_domains - collector.defined_domains: + if domain_name is None: + continue + value = missing_domain(domain_name) + if value is None: + raise DomainError("Domain '{}' is used but not defined".format(domain_name)) + if type(value) is ClockDomain: + self.add_domains(value) + # And expose ports on the newly added clock domain, since it is added directly + # and there was no chance to add any logic driving it. + new_domains.append(value) + else: + new_fragment = Fragment.get(value, platform=platform) + if domain_name not in new_fragment.domains: + defined = new_fragment.domains.keys() + raise DomainError( + "Fragment returned by missing domain callback does not define " + "requested domain '{}' (defines {})." + .format(domain_name, ", ".join("'{}'".format(n) for n in defined))) + self.add_subfragment(new_fragment, "cd_{}".format(domain_name)) + self.add_domains(new_fragment.domains.values()) + return new_domains + + def _propagate_domains(self, missing_domain, *, platform=None): + self._propagate_domains_up() + self._propagate_domains_down() + self._resolve_hierarchy_conflicts() + new_domains = self._create_missing_domains(missing_domain, platform=platform) + self._propagate_domains_down() + return new_domains + + def _prepare_use_def_graph(self, parent, level, uses, defs, ios, top): + def add_uses(*sigs, self=self): + for sig in flatten(sigs): + if sig not in uses: + uses[sig] = set() + uses[sig].add(self) + + def add_defs(*sigs): + for sig in flatten(sigs): + if sig not in defs: + defs[sig] = self + else: + assert defs[sig] is self + + def add_io(*sigs): + for sig in flatten(sigs): + if sig not in ios: + ios[sig] = self + else: + assert ios[sig] is self + + # Collect all signals we're driving (on LHS of statements), and signals we're using + # (on RHS of statements, or in clock domains). + for stmt in self.statements: + add_uses(stmt._rhs_signals()) + add_defs(stmt._lhs_signals()) + + for domain, _ in self.iter_sync(): + cd = self.domains[domain] + add_uses(cd.clk) + if cd.rst is not None: + add_uses(cd.rst) + + # Repeat for subfragments. + for subfrag, name in self.subfragments: + if isinstance(subfrag, Instance): + for port_name, (value, dir) in subfrag.named_ports.items(): + if dir == "i": + # Prioritize defs over uses. + rhs_without_outputs = value._rhs_signals() - subfrag.iter_ports(dir="o") + subfrag.add_ports(rhs_without_outputs, dir=dir) + add_uses(value._rhs_signals()) + if dir == "o": + subfrag.add_ports(value._lhs_signals(), dir=dir) + add_defs(value._lhs_signals()) + if dir == "io": + subfrag.add_ports(value._lhs_signals(), dir=dir) + add_io(value._lhs_signals()) + else: + parent[subfrag] = self + level [subfrag] = level[self] + 1 + + subfrag._prepare_use_def_graph(parent, level, uses, defs, ios, top) + + def _propagate_ports(self, ports, all_undef_as_ports): + # Take this fragment graph: + # + # __ B (def: q, use: p r) + # / + # A (def: p, use: q r) + # \ + # \_ C (def: r, use: p q) + # + # We need to consider three cases. + # 1. Signal p requires an input port in B; + # 2. Signal r requires an output port in C; + # 3. Signal r requires an output port in C and an input port in B. + # + # Adding these ports can be in general done in three steps for each signal: + # 1. Find the least common ancestor of all uses and defs. + # 2. Going upwards from the single def, add output ports. + # 3. Going upwards from all uses, add input ports. + + parent = {self: None} + level = {self: 0} + uses = SignalDict() + defs = SignalDict() + ios = SignalDict() + self._prepare_use_def_graph(parent, level, uses, defs, ios, self) + + ports = SignalSet(ports) + if all_undef_as_ports: + for sig in uses: + if sig in defs: + continue + ports.add(sig) + for sig in ports: + if sig not in uses: + uses[sig] = set() + uses[sig].add(self) + + @memoize + def lca_of(fragu, fragv): + # Normalize fragu to be deeper than fragv. + if level[fragu] < level[fragv]: + fragu, fragv = fragv, fragu + # Find ancestor of fragu on the same level as fragv. + for _ in range(level[fragu] - level[fragv]): + fragu = parent[fragu] + # If fragv was the ancestor of fragv, we're done. + if fragu == fragv: + return fragu + # Otherwise, they are at the same level but in different branches. Step both fragu + # and fragv until we find the common ancestor. + while parent[fragu] != parent[fragv]: + fragu = parent[fragu] + fragv = parent[fragv] + return parent[fragu] + + for sig in uses: + if sig in defs: + lca = reduce(lca_of, uses[sig], defs[sig]) + else: + lca = reduce(lca_of, uses[sig]) + + for frag in uses[sig]: + if sig in defs and frag is defs[sig]: + continue + while frag != lca: + frag.add_ports(sig, dir="i") + frag = parent[frag] + + if sig in defs: + frag = defs[sig] + while frag != lca: + frag.add_ports(sig, dir="o") + frag = parent[frag] + + for sig in ios: + frag = ios[sig] + while frag is not None: + frag.add_ports(sig, dir="io") + frag = parent[frag] + + for sig in ports: + if sig in ios: + continue + if sig in defs: + self.add_ports(sig, dir="o") + else: + self.add_ports(sig, dir="i") + + def prepare(self, ports=None, missing_domain=lambda name: ClockDomain(name)): + from .xfrm import SampleLowerer, DomainLowerer + + fragment = SampleLowerer()(self) + new_domains = fragment._propagate_domains(missing_domain) + fragment = DomainLowerer()(fragment) + if ports is None: + fragment._propagate_ports(ports=(), all_undef_as_ports=True) + else: + if not isinstance(ports, tuple) and not isinstance(ports, list): + msg = "`ports` must be either a list or a tuple, not {!r}"\ + .format(ports) + if isinstance(ports, Value): + msg += " (did you mean `ports=(,)`, rather than `ports=`?)" + raise TypeError(msg) + mapped_ports = [] + # Lower late bound signals like ClockSignal() to ports. + port_lowerer = DomainLowerer(fragment.domains) + for port in ports: + if not isinstance(port, (Signal, ClockSignal, ResetSignal)): + raise TypeError("Only signals may be added as ports, not {!r}" + .format(port)) + mapped_ports.append(port_lowerer.on_value(port)) + # Add ports for all newly created missing clock domains, since not doing so defeats + # the purpose of domain auto-creation. (It's possible to refer to these ports before + # the domain actually exists through late binding, but it's inconvenient.) + for cd in new_domains: + mapped_ports.append(cd.clk) + if cd.rst is not None: + mapped_ports.append(cd.rst) + fragment._propagate_ports(ports=mapped_ports, all_undef_as_ports=False) + return fragment + + +class Instance(Fragment): + def __init__(self, type, *args, **kwargs): + super().__init__() + + self.type = type + self.parameters = OrderedDict() + self.named_ports = OrderedDict() + + for (kind, name, value) in args: + if kind == "a": + self.attrs[name] = value + elif kind == "p": + self.parameters[name] = value + elif kind in ("i", "o", "io"): + self.named_ports[name] = (Value.cast(value), kind) + else: + raise NameError("Instance argument {!r} should be a tuple (kind, name, value) " + "where kind is one of \"a\", \"p\", \"i\", \"o\", or \"io\"" + .format((kind, name, value))) + + for kw, arg in kwargs.items(): + if kw.startswith("a_"): + self.attrs[kw[2:]] = arg + elif kw.startswith("p_"): + self.parameters[kw[2:]] = arg + elif kw.startswith("i_"): + self.named_ports[kw[2:]] = (Value.cast(arg), "i") + elif kw.startswith("o_"): + self.named_ports[kw[2:]] = (Value.cast(arg), "o") + elif kw.startswith("io_"): + self.named_ports[kw[3:]] = (Value.cast(arg), "io") + else: + raise NameError("Instance keyword argument {}={!r} does not start with one of " + "\"a_\", \"p_\", \"i_\", \"o_\", or \"io_\"" + .format(kw, arg)) diff --git a/amaranth/hdl/mem.py b/amaranth/hdl/mem.py new file mode 100644 index 0000000..fd7c874 --- /dev/null +++ b/amaranth/hdl/mem.py @@ -0,0 +1,322 @@ +import operator +from collections import OrderedDict + +from .. import tracer +from .ast import * +from .ir import Elaboratable, Instance + + +__all__ = ["Memory", "ReadPort", "WritePort", "DummyPort"] + + +class Memory: + """A word addressable storage. + + Parameters + ---------- + width : int + Access granularity. Each storage element of this memory is ``width`` bits in size. + depth : int + Word count. This memory contains ``depth`` storage elements. + init : list of int + Initial values. At power on, each storage element in this memory is initialized to + the corresponding element of ``init``, if any, or to zero otherwise. + Uninitialized memories are not currently supported. + name : str + Name hint for this memory. If ``None`` (default) the name is inferred from the variable + name this ``Signal`` is assigned to. + attrs : dict + Dictionary of synthesis attributes. + + Attributes + ---------- + width : int + depth : int + init : list of int + attrs : dict + """ + def __init__(self, *, width, depth, init=None, name=None, attrs=None, simulate=True): + if not isinstance(width, int) or width < 0: + raise TypeError("Memory width must be a non-negative integer, not {!r}" + .format(width)) + if not isinstance(depth, int) or depth < 0: + raise TypeError("Memory depth must be a non-negative integer, not {!r}" + .format(depth)) + + self.name = name or tracer.get_var_name(depth=2, default="$memory") + self.src_loc = tracer.get_src_loc() + + self.width = width + self.depth = depth + self.attrs = OrderedDict(() if attrs is None else attrs) + + # Array of signals for simulation. + self._array = Array() + if simulate: + for addr in range(self.depth): + self._array.append(Signal(self.width, name="{}({})" + .format(name or "memory", addr))) + + self.init = init + + @property + def init(self): + return self._init + + @init.setter + def init(self, new_init): + self._init = [] if new_init is None else list(new_init) + if len(self.init) > self.depth: + raise ValueError("Memory initialization value count exceed memory depth ({} > {})" + .format(len(self.init), self.depth)) + + try: + for addr in range(len(self._array)): + if addr < len(self._init): + self._array[addr].reset = operator.index(self._init[addr]) + else: + self._array[addr].reset = 0 + except TypeError as e: + raise TypeError("Memory initialization value at address {:x}: {}" + .format(addr, e)) from None + + def read_port(self, *, src_loc_at=0, **kwargs): + """Get a read port. + + See :class:`ReadPort` for details. + + Arguments + --------- + domain : str + transparent : bool + + Returns + ------- + An instance of :class:`ReadPort` associated with this memory. + """ + return ReadPort(self, src_loc_at=1 + src_loc_at, **kwargs) + + def write_port(self, *, src_loc_at=0, **kwargs): + """Get a write port. + + See :class:`WritePort` for details. + + Arguments + --------- + domain : str + granularity : int + + Returns + ------- + An instance of :class:`WritePort` associated with this memory. + """ + return WritePort(self, src_loc_at=1 + src_loc_at, **kwargs) + + def __getitem__(self, index): + """Simulation only.""" + return self._array[index] + + +class ReadPort(Elaboratable): + """A memory read port. + + Parameters + ---------- + memory : :class:`Memory` + Memory associated with the port. + domain : str + Clock domain. Defaults to ``"sync"``. If set to ``"comb"``, the port is asynchronous. + Otherwise, the read data becomes available on the next clock cycle. + transparent : bool + Port transparency. If set (default), a read at an address that is also being written to in + the same clock cycle will output the new value. Otherwise, the old value will be output + first. This behavior only applies to ports in the same domain. + + Attributes + ---------- + memory : :class:`Memory` + domain : str + transparent : bool + addr : Signal(range(memory.depth)), in + Read address. + data : Signal(memory.width), out + Read data. + en : Signal or Const, in + Read enable. If asserted, ``data`` is updated with the word stored at ``addr``. Note that + transparent ports cannot assign ``en`` (which is hardwired to 1 instead), as doing so is + currently not supported by Yosys. + + Exceptions + ---------- + Raises :exn:`ValueError` if the read port is simultaneously asynchronous and non-transparent. + """ + def __init__(self, memory, *, domain="sync", transparent=True, src_loc_at=0): + if domain == "comb" and not transparent: + raise ValueError("Read port cannot be simultaneously asynchronous and non-transparent") + + self.memory = memory + self.domain = domain + self.transparent = transparent + + self.addr = Signal(range(memory.depth), + name="{}_r_addr".format(memory.name), src_loc_at=1 + src_loc_at) + self.data = Signal(memory.width, + name="{}_r_data".format(memory.name), src_loc_at=1 + src_loc_at) + if self.domain != "comb" and not transparent: + self.en = Signal(name="{}_r_en".format(memory.name), reset=1, + src_loc_at=1 + src_loc_at) + else: + self.en = Const(1) + + def elaborate(self, platform): + f = Instance("$memrd", + p_MEMID=self.memory, + p_ABITS=self.addr.width, + p_WIDTH=self.data.width, + p_CLK_ENABLE=self.domain != "comb", + p_CLK_POLARITY=1, + p_TRANSPARENT=self.transparent, + i_CLK=ClockSignal(self.domain) if self.domain != "comb" else Const(0), + i_EN=self.en, + i_ADDR=self.addr, + o_DATA=self.data, + ) + if self.domain == "comb": + # Asynchronous port + f.add_statements(self.data.eq(self.memory._array[self.addr])) + f.add_driver(self.data) + elif not self.transparent: + # Synchronous, read-before-write port + f.add_statements( + Switch(self.en, { + 1: self.data.eq(self.memory._array[self.addr]) + }) + ) + f.add_driver(self.data, self.domain) + else: + # Synchronous, write-through port + # This model is a bit unconventional. We model transparent ports as asynchronous ports + # that are latched when the clock is high. This isn't exactly correct, but it is very + # close to the correct behavior of a transparent port, and the difference should only + # be observable in pathological cases of clock gating. A register is injected to + # the address input to achieve the correct address-to-data latency. Also, the reset + # value of the data output is forcibly set to the 0th initial value, if any--note that + # many FPGAs do not guarantee this behavior! + if len(self.memory.init) > 0: + self.data.reset = operator.index(self.memory.init[0]) + latch_addr = Signal.like(self.addr) + f.add_statements( + latch_addr.eq(self.addr), + Switch(ClockSignal(self.domain), { + 0: self.data.eq(self.data), + 1: self.data.eq(self.memory._array[latch_addr]), + }), + ) + f.add_driver(latch_addr, self.domain) + f.add_driver(self.data) + return f + + +class WritePort(Elaboratable): + """A memory write port. + + Parameters + ---------- + memory : :class:`Memory` + Memory associated with the port. + domain : str + Clock domain. Defaults to ``"sync"``. Writes have a latency of 1 clock cycle. + granularity : int + Port granularity. Defaults to ``memory.width``. Write data is split evenly in + ``memory.width // granularity`` chunks, which can be updated independently. + + Attributes + ---------- + memory : :class:`Memory` + domain : str + granularity : int + addr : Signal(range(memory.depth)), in + Write address. + data : Signal(memory.width), in + Write data. + en : Signal(memory.width // granularity), in + Write enable. Each bit selects a non-overlapping chunk of ``granularity`` bits on the + ``data`` signal, which is written to memory at ``addr``. Unselected chunks are ignored. + + Exceptions + ---------- + Raises :exn:`ValueError` if the write port granularity is greater than memory width, or does not + divide memory width evenly. + """ + def __init__(self, memory, *, domain="sync", granularity=None, src_loc_at=0): + if granularity is None: + granularity = memory.width + if not isinstance(granularity, int) or granularity < 0: + raise TypeError("Write port granularity must be a non-negative integer, not {!r}" + .format(granularity)) + if granularity > memory.width: + raise ValueError("Write port granularity must not be greater than memory width " + "({} > {})" + .format(granularity, memory.width)) + if memory.width // granularity * granularity != memory.width: + raise ValueError("Write port granularity must divide memory width evenly") + + self.memory = memory + self.domain = domain + self.granularity = granularity + + self.addr = Signal(range(memory.depth), + name="{}_w_addr".format(memory.name), src_loc_at=1 + src_loc_at) + self.data = Signal(memory.width, + name="{}_w_data".format(memory.name), src_loc_at=1 + src_loc_at) + self.en = Signal(memory.width // granularity, + name="{}_w_en".format(memory.name), src_loc_at=1 + src_loc_at) + + def elaborate(self, platform): + f = Instance("$memwr", + p_MEMID=self.memory, + p_ABITS=self.addr.width, + p_WIDTH=self.data.width, + p_CLK_ENABLE=1, + p_CLK_POLARITY=1, + p_PRIORITY=0, + i_CLK=ClockSignal(self.domain), + i_EN=Cat(Repl(en_bit, self.granularity) for en_bit in self.en), + i_ADDR=self.addr, + i_DATA=self.data, + ) + if len(self.en) > 1: + for index, en_bit in enumerate(self.en): + offset = index * self.granularity + bits = slice(offset, offset + self.granularity) + write_data = self.memory._array[self.addr][bits].eq(self.data[bits]) + f.add_statements(Switch(en_bit, { 1: write_data })) + else: + write_data = self.memory._array[self.addr].eq(self.data) + f.add_statements(Switch(self.en, { 1: write_data })) + for signal in self.memory._array: + f.add_driver(signal, self.domain) + return f + + +class DummyPort: + """Dummy memory port. + + This port can be used in place of either a read or a write port for testing and verification. + It does not include any read/write port specific attributes, i.e. none besides ``"domain"``; + any such attributes may be set manually. + """ + def __init__(self, *, data_width, addr_width, domain="sync", name=None, granularity=None): + self.domain = domain + + if granularity is None: + granularity = data_width + if name is None: + name = tracer.get_var_name(depth=2, default="dummy") + + self.addr = Signal(addr_width, + name="{}_addr".format(name), src_loc_at=1) + self.data = Signal(data_width, + name="{}_data".format(name), src_loc_at=1) + self.en = Signal(data_width // granularity, + name="{}_en".format(name), src_loc_at=1) diff --git a/amaranth/hdl/rec.py b/amaranth/hdl/rec.py new file mode 100644 index 0000000..125bb4c --- /dev/null +++ b/amaranth/hdl/rec.py @@ -0,0 +1,278 @@ +from enum import Enum +from collections import OrderedDict +from functools import reduce, wraps + +from .. import tracer +from .._utils import union +from .ast import * + + +__all__ = ["Direction", "DIR_NONE", "DIR_FANOUT", "DIR_FANIN", "Layout", "Record"] + + +Direction = Enum('Direction', ('NONE', 'FANOUT', 'FANIN')) + +DIR_NONE = Direction.NONE +DIR_FANOUT = Direction.FANOUT +DIR_FANIN = Direction.FANIN + + +class Layout: + @staticmethod + def cast(obj, *, src_loc_at=0): + if isinstance(obj, Layout): + return obj + return Layout(obj, src_loc_at=1 + src_loc_at) + + def __init__(self, fields, *, src_loc_at=0): + self.fields = OrderedDict() + for field in fields: + if not isinstance(field, tuple) or len(field) not in (2, 3): + raise TypeError("Field {!r} has invalid layout: should be either " + "(name, shape) or (name, shape, direction)" + .format(field)) + if len(field) == 2: + name, shape = field + direction = DIR_NONE + if isinstance(shape, list): + shape = Layout.cast(shape) + else: + name, shape, direction = field + if not isinstance(direction, Direction): + raise TypeError("Field {!r} has invalid direction: should be a Direction " + "instance like DIR_FANIN" + .format(field)) + if not isinstance(name, str): + raise TypeError("Field {!r} has invalid name: should be a string" + .format(field)) + if not isinstance(shape, Layout): + try: + # Check provided shape by calling Shape.cast and checking for exception + Shape.cast(shape, src_loc_at=1 + src_loc_at) + except Exception: + raise TypeError("Field {!r} has invalid shape: should be castable to Shape " + "or a list of fields of a nested record" + .format(field)) + if name in self.fields: + raise NameError("Field {!r} has a name that is already present in the layout" + .format(field)) + self.fields[name] = (shape, direction) + + def __getitem__(self, item): + if isinstance(item, tuple): + return Layout([ + (name, shape, dir) + for (name, (shape, dir)) in self.fields.items() + if name in item + ]) + + return self.fields[item] + + def __iter__(self): + for name, (shape, dir) in self.fields.items(): + yield (name, shape, dir) + + def __eq__(self, other): + return self.fields == other.fields + + def __repr__(self): + field_reprs = [] + for name, shape, dir in self: + if dir == DIR_NONE: + field_reprs.append("({!r}, {!r})".format(name, shape)) + else: + field_reprs.append("({!r}, {!r}, Direction.{})".format(name, shape, dir.name)) + return "Layout([{}])".format(", ".join(field_reprs)) + + +class Record(ValueCastable): + @staticmethod + def like(other, *, name=None, name_suffix=None, src_loc_at=0): + if name is not None: + new_name = str(name) + elif name_suffix is not None: + new_name = other.name + str(name_suffix) + else: + new_name = tracer.get_var_name(depth=2 + src_loc_at, default=None) + + def concat(a, b): + if a is None: + return b + return "{}__{}".format(a, b) + + fields = {} + for field_name in other.fields: + field = other[field_name] + if isinstance(field, Record): + fields[field_name] = Record.like(field, name=concat(new_name, field_name), + src_loc_at=1 + src_loc_at) + else: + fields[field_name] = Signal.like(field, name=concat(new_name, field_name), + src_loc_at=1 + src_loc_at) + + return Record(other.layout, name=new_name, fields=fields, src_loc_at=1) + + def __init__(self, layout, *, name=None, fields=None, src_loc_at=0): + if name is None: + name = tracer.get_var_name(depth=2 + src_loc_at, default=None) + + self.name = name + self.src_loc = tracer.get_src_loc(src_loc_at) + + def concat(a, b): + if a is None: + return b + return "{}__{}".format(a, b) + + self.layout = Layout.cast(layout, src_loc_at=1 + src_loc_at) + self.fields = OrderedDict() + for field_name, field_shape, field_dir in self.layout: + if fields is not None and field_name in fields: + field = fields[field_name] + if isinstance(field_shape, Layout): + assert isinstance(field, Record) and field_shape == field.layout + else: + assert isinstance(field, Signal) and Shape.cast(field_shape) == field.shape() + self.fields[field_name] = field + else: + if isinstance(field_shape, Layout): + self.fields[field_name] = Record(field_shape, name=concat(name, field_name), + src_loc_at=1 + src_loc_at) + else: + self.fields[field_name] = Signal(field_shape, name=concat(name, field_name), + src_loc_at=1 + src_loc_at) + + def __getattr__(self, name): + return self[name] + + def __getitem__(self, item): + if isinstance(item, str): + try: + return self.fields[item] + except KeyError: + if self.name is None: + reference = "Unnamed record" + else: + reference = "Record '{}'".format(self.name) + raise AttributeError("{} does not have a field '{}'. Did you mean one of: {}?" + .format(reference, item, ", ".join(self.fields))) from None + elif isinstance(item, tuple): + return Record(self.layout[item], fields={ + field_name: field_value + for field_name, field_value in self.fields.items() + if field_name in item + }) + else: + try: + return Value.__getitem__(self, item) + except KeyError: + if self.name is None: + reference = "Unnamed record" + else: + reference = "Record '{}'".format(self.name) + raise AttributeError("{} does not have a field '{}'. Did you mean one of: {}?" + .format(reference, item, ", ".join(self.fields))) from None + + @ValueCastable.lowermethod + def as_value(self): + return Cat(self.fields.values()) + + def __len__(self): + return len(self.as_value()) + + def _lhs_signals(self): + return union((f._lhs_signals() for f in self.fields.values()), start=SignalSet()) + + def _rhs_signals(self): + return union((f._rhs_signals() for f in self.fields.values()), start=SignalSet()) + + def __repr__(self): + fields = [] + for field_name, field in self.fields.items(): + if isinstance(field, Signal): + fields.append(field_name) + else: + fields.append(repr(field)) + name = self.name + if name is None: + name = "" + return "(rec {} {})".format(name, " ".join(fields)) + + def shape(self): + return self.as_value().shape() + + def connect(self, *subordinates, include=None, exclude=None): + def rec_name(record): + if record.name is None: + return "unnamed record" + else: + return "record '{}'".format(record.name) + + for field in include or {}: + if field not in self.fields: + raise AttributeError("Cannot include field '{}' because it is not present in {}" + .format(field, rec_name(self))) + for field in exclude or {}: + if field not in self.fields: + raise AttributeError("Cannot exclude field '{}' because it is not present in {}" + .format(field, rec_name(self))) + + stmts = [] + for field in self.fields: + if include is not None and field not in include: + continue + if exclude is not None and field in exclude: + continue + + shape, direction = self.layout[field] + if not isinstance(shape, Layout) and direction == DIR_NONE: + raise TypeError("Cannot connect field '{}' of {} because it does not have " + "a direction" + .format(field, rec_name(self))) + + item = self.fields[field] + subord_items = [] + for subord in subordinates: + if field not in subord.fields: + raise AttributeError("Cannot connect field '{}' of {} to subordinate {} " + "because the subordinate record does not have this field" + .format(field, rec_name(self), rec_name(subord))) + subord_items.append(subord.fields[field]) + + if isinstance(shape, Layout): + sub_include = include[field] if include and field in include else None + sub_exclude = exclude[field] if exclude and field in exclude else None + stmts += item.connect(*subord_items, include=sub_include, exclude=sub_exclude) + else: + if direction == DIR_FANOUT: + stmts += [sub_item.eq(item) for sub_item in subord_items] + if direction == DIR_FANIN: + stmts += [item.eq(reduce(lambda a, b: a | b, subord_items))] + + return stmts + +def _valueproxy(name): + value_func = getattr(Value, name) + @wraps(value_func) + def _wrapper(self, *args, **kwargs): + return value_func(Value.cast(self), *args, **kwargs) + return _wrapper + +for name in [ + "__bool__", + "__invert__", "__neg__", + "__add__", "__radd__", "__sub__", "__rsub__", + "__mul__", "__rmul__", + "__mod__", "__rmod__", "__floordiv__", "__rfloordiv__", + "__lshift__", "__rlshift__", "__rshift__", "__rrshift__", + "__and__", "__rand__", "__xor__", "__rxor__", "__or__", "__ror__", + "__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__", + "__abs__", "__len__", + "as_unsigned", "as_signed", "bool", "any", "all", "xor", "implies", + "bit_select", "word_select", "matches", + "shift_left", "shift_right", "rotate_left", "rotate_right", "eq" + ]: + setattr(Record, name, _valueproxy(name)) + +del _valueproxy +del name diff --git a/amaranth/hdl/xfrm.py b/amaranth/hdl/xfrm.py new file mode 100644 index 0000000..924cb77 --- /dev/null +++ b/amaranth/hdl/xfrm.py @@ -0,0 +1,743 @@ +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +from collections.abc import Iterable + +from .._utils import flatten +from .. import tracer +from .ast import * +from .ast import _StatementList +from .cd import * +from .ir import * +from .rec import * + + +__all__ = ["ValueVisitor", "ValueTransformer", + "StatementVisitor", "StatementTransformer", + "FragmentTransformer", + "TransformedElaboratable", + "DomainCollector", "DomainRenamer", "DomainLowerer", + "SampleDomainInjector", "SampleLowerer", + "SwitchCleaner", "LHSGroupAnalyzer", "LHSGroupFilter", + "ResetInserter", "EnableInserter"] + + +class ValueVisitor(metaclass=ABCMeta): + @abstractmethod + def on_Const(self, value): + pass # :nocov: + + @abstractmethod + def on_AnyConst(self, value): + pass # :nocov: + + @abstractmethod + def on_AnySeq(self, value): + pass # :nocov: + + @abstractmethod + def on_Signal(self, value): + pass # :nocov: + + @abstractmethod + def on_ClockSignal(self, value): + pass # :nocov: + + @abstractmethod + def on_ResetSignal(self, value): + pass # :nocov: + + @abstractmethod + def on_Operator(self, value): + pass # :nocov: + + @abstractmethod + def on_Slice(self, value): + pass # :nocov: + + @abstractmethod + def on_Part(self, value): + pass # :nocov: + + @abstractmethod + def on_Cat(self, value): + pass # :nocov: + + @abstractmethod + def on_Repl(self, value): + pass # :nocov: + + @abstractmethod + def on_ArrayProxy(self, value): + pass # :nocov: + + @abstractmethod + def on_Sample(self, value): + pass # :nocov: + + @abstractmethod + def on_Initial(self, value): + pass # :nocov: + + def on_unknown_value(self, value): + raise TypeError("Cannot transform value {!r}".format(value)) # :nocov: + + def replace_value_src_loc(self, value, new_value): + return True + + def on_value(self, value): + if type(value) is Const: + new_value = self.on_Const(value) + elif type(value) is AnyConst: + new_value = self.on_AnyConst(value) + elif type(value) is AnySeq: + new_value = self.on_AnySeq(value) + elif isinstance(value, Signal): + # Uses `isinstance()` and not `type() is` because amaranth.compat requires it. + new_value = self.on_Signal(value) + elif type(value) is ClockSignal: + new_value = self.on_ClockSignal(value) + elif type(value) is ResetSignal: + new_value = self.on_ResetSignal(value) + elif type(value) is Operator: + new_value = self.on_Operator(value) + elif type(value) is Slice: + new_value = self.on_Slice(value) + elif type(value) is Part: + new_value = self.on_Part(value) + elif type(value) is Cat: + new_value = self.on_Cat(value) + elif type(value) is Repl: + new_value = self.on_Repl(value) + elif type(value) is ArrayProxy: + new_value = self.on_ArrayProxy(value) + elif type(value) is Sample: + new_value = self.on_Sample(value) + elif type(value) is Initial: + new_value = self.on_Initial(value) + elif isinstance(value, UserValue): + # Uses `isinstance()` and not `type() is` to allow inheriting. + new_value = self.on_value(value._lazy_lower()) + else: + new_value = self.on_unknown_value(value) + if isinstance(new_value, Value) and self.replace_value_src_loc(value, new_value): + new_value.src_loc = value.src_loc + return new_value + + def __call__(self, value): + return self.on_value(value) + + +class ValueTransformer(ValueVisitor): + def on_Const(self, value): + return value + + def on_AnyConst(self, value): + return value + + def on_AnySeq(self, value): + return value + + def on_Signal(self, value): + return value + + def on_ClockSignal(self, value): + return value + + def on_ResetSignal(self, value): + return value + + def on_Operator(self, value): + return Operator(value.operator, [self.on_value(o) for o in value.operands]) + + def on_Slice(self, value): + return Slice(self.on_value(value.value), value.start, value.stop) + + def on_Part(self, value): + return Part(self.on_value(value.value), self.on_value(value.offset), + value.width, value.stride) + + def on_Cat(self, value): + return Cat(self.on_value(o) for o in value.parts) + + def on_Repl(self, value): + return Repl(self.on_value(value.value), value.count) + + def on_ArrayProxy(self, value): + return ArrayProxy([self.on_value(elem) for elem in value._iter_as_values()], + self.on_value(value.index)) + + def on_Sample(self, value): + return Sample(self.on_value(value.value), value.clocks, value.domain) + + def on_Initial(self, value): + return value + + +class StatementVisitor(metaclass=ABCMeta): + @abstractmethod + def on_Assign(self, stmt): + pass # :nocov: + + @abstractmethod + def on_Assert(self, stmt): + pass # :nocov: + + @abstractmethod + def on_Assume(self, stmt): + pass # :nocov: + + @abstractmethod + def on_Cover(self, stmt): + pass # :nocov: + + @abstractmethod + def on_Switch(self, stmt): + pass # :nocov: + + @abstractmethod + def on_statements(self, stmts): + pass # :nocov: + + def on_unknown_statement(self, stmt): + raise TypeError("Cannot transform statement {!r}".format(stmt)) # :nocov: + + def replace_statement_src_loc(self, stmt, new_stmt): + return True + + def on_statement(self, stmt): + if type(stmt) is Assign: + new_stmt = self.on_Assign(stmt) + elif type(stmt) is Assert: + new_stmt = self.on_Assert(stmt) + elif type(stmt) is Assume: + new_stmt = self.on_Assume(stmt) + elif type(stmt) is Cover: + new_stmt = self.on_Cover(stmt) + elif isinstance(stmt, Switch): + # Uses `isinstance()` and not `type() is` because amaranth.compat requires it. + new_stmt = self.on_Switch(stmt) + elif isinstance(stmt, Iterable): + new_stmt = self.on_statements(stmt) + else: + new_stmt = self.on_unknown_statement(stmt) + if isinstance(new_stmt, Statement) and self.replace_statement_src_loc(stmt, new_stmt): + new_stmt.src_loc = stmt.src_loc + if isinstance(new_stmt, Switch) and isinstance(stmt, Switch): + new_stmt.case_src_locs = stmt.case_src_locs + if isinstance(new_stmt, Property): + new_stmt._MustUse__used = True + return new_stmt + + def __call__(self, stmt): + return self.on_statement(stmt) + + +class StatementTransformer(StatementVisitor): + def on_value(self, value): + return value + + def on_Assign(self, stmt): + return Assign(self.on_value(stmt.lhs), self.on_value(stmt.rhs)) + + def on_Assert(self, stmt): + return Assert(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en) + + def on_Assume(self, stmt): + return Assume(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en) + + def on_Cover(self, stmt): + return Cover(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en) + + def on_Switch(self, stmt): + cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items()) + return Switch(self.on_value(stmt.test), cases) + + def on_statements(self, stmts): + return _StatementList(flatten(self.on_statement(stmt) for stmt in stmts)) + + +class FragmentTransformer: + def map_subfragments(self, fragment, new_fragment): + for subfragment, name in fragment.subfragments: + new_fragment.add_subfragment(self(subfragment), name) + + def map_ports(self, fragment, new_fragment): + for port, dir in fragment.ports.items(): + new_fragment.add_ports(port, dir=dir) + + def map_named_ports(self, fragment, new_fragment): + if hasattr(self, "on_value"): + for name, (value, dir) in fragment.named_ports.items(): + new_fragment.named_ports[name] = self.on_value(value), dir + else: + new_fragment.named_ports = OrderedDict(fragment.named_ports.items()) + + def map_domains(self, fragment, new_fragment): + for domain in fragment.iter_domains(): + new_fragment.add_domains(fragment.domains[domain]) + + def map_statements(self, fragment, new_fragment): + if hasattr(self, "on_statement"): + new_fragment.add_statements(map(self.on_statement, fragment.statements)) + else: + new_fragment.add_statements(fragment.statements) + + def map_drivers(self, fragment, new_fragment): + for domain, signal in fragment.iter_drivers(): + new_fragment.add_driver(signal, domain) + + def on_fragment(self, fragment): + if isinstance(fragment, Instance): + new_fragment = Instance(fragment.type) + new_fragment.parameters = OrderedDict(fragment.parameters) + self.map_named_ports(fragment, new_fragment) + else: + new_fragment = Fragment() + new_fragment.flatten = fragment.flatten + new_fragment.attrs = OrderedDict(fragment.attrs) + self.map_ports(fragment, new_fragment) + self.map_subfragments(fragment, new_fragment) + self.map_domains(fragment, new_fragment) + self.map_statements(fragment, new_fragment) + self.map_drivers(fragment, new_fragment) + return new_fragment + + def __call__(self, value, *, src_loc_at=0): + if isinstance(value, Fragment): + return self.on_fragment(value) + elif isinstance(value, TransformedElaboratable): + value._transforms_.append(self) + return value + elif hasattr(value, "elaborate"): + value = TransformedElaboratable(value, src_loc_at=1 + src_loc_at) + value._transforms_.append(self) + return value + else: + raise AttributeError("Object {!r} cannot be elaborated".format(value)) + + +class TransformedElaboratable(Elaboratable): + def __init__(self, elaboratable, *, src_loc_at=0): + assert hasattr(elaboratable, "elaborate") + + # Fields prefixed and suffixed with underscore to avoid as many conflicts with the inner + # object as possible, since we're forwarding attribute requests to it. + self._elaboratable_ = elaboratable + self._transforms_ = [] + + def __getattr__(self, attr): + return getattr(self._elaboratable_, attr) + + def elaborate(self, platform): + fragment = Fragment.get(self._elaboratable_, platform) + for transform in self._transforms_: + fragment = transform(fragment) + return fragment + + +class DomainCollector(ValueVisitor, StatementVisitor): + def __init__(self): + self.used_domains = set() + self.defined_domains = set() + self._local_domains = set() + + def _add_used_domain(self, domain_name): + if domain_name is None: + return + if domain_name in self._local_domains: + return + self.used_domains.add(domain_name) + + def on_ignore(self, value): + pass + + on_Const = on_ignore + on_AnyConst = on_ignore + on_AnySeq = on_ignore + on_Signal = on_ignore + + def on_ClockSignal(self, value): + self._add_used_domain(value.domain) + + def on_ResetSignal(self, value): + self._add_used_domain(value.domain) + + def on_Operator(self, value): + for o in value.operands: + self.on_value(o) + + def on_Slice(self, value): + self.on_value(value.value) + + def on_Part(self, value): + self.on_value(value.value) + self.on_value(value.offset) + + def on_Cat(self, value): + for o in value.parts: + self.on_value(o) + + def on_Repl(self, value): + self.on_value(value.value) + + def on_ArrayProxy(self, value): + for elem in value._iter_as_values(): + self.on_value(elem) + self.on_value(value.index) + + def on_Sample(self, value): + self.on_value(value.value) + + def on_Initial(self, value): + pass + + def on_Assign(self, stmt): + self.on_value(stmt.lhs) + self.on_value(stmt.rhs) + + def on_property(self, stmt): + self.on_value(stmt.test) + + on_Assert = on_property + on_Assume = on_property + on_Cover = on_property + + def on_Switch(self, stmt): + self.on_value(stmt.test) + for stmts in stmt.cases.values(): + self.on_statement(stmts) + + def on_statements(self, stmts): + for stmt in stmts: + self.on_statement(stmt) + + def on_fragment(self, fragment): + if isinstance(fragment, Instance): + for name, (value, dir) in fragment.named_ports.items(): + self.on_value(value) + + old_local_domains, self._local_domains = self._local_domains, set(self._local_domains) + for domain_name, domain in fragment.domains.items(): + if domain.local: + self._local_domains.add(domain_name) + else: + self.defined_domains.add(domain_name) + + self.on_statements(fragment.statements) + for domain_name in fragment.drivers: + self._add_used_domain(domain_name) + for subfragment, name in fragment.subfragments: + self.on_fragment(subfragment) + + self._local_domains = old_local_domains + + def __call__(self, fragment): + self.on_fragment(fragment) + + +class DomainRenamer(FragmentTransformer, ValueTransformer, StatementTransformer): + def __init__(self, domain_map): + if isinstance(domain_map, str): + domain_map = {"sync": domain_map} + for src, dst in domain_map.items(): + if src == "comb": + raise ValueError("Domain '{}' may not be renamed".format(src)) + if dst == "comb": + raise ValueError("Domain '{}' may not be renamed to '{}'".format(src, dst)) + self.domain_map = OrderedDict(domain_map) + + def on_ClockSignal(self, value): + if value.domain in self.domain_map: + return ClockSignal(self.domain_map[value.domain]) + return value + + def on_ResetSignal(self, value): + if value.domain in self.domain_map: + return ResetSignal(self.domain_map[value.domain], + allow_reset_less=value.allow_reset_less) + return value + + def map_domains(self, fragment, new_fragment): + for domain in fragment.iter_domains(): + cd = fragment.domains[domain] + if domain in self.domain_map: + if cd.name == domain: + # Rename the actual ClockDomain object. + cd.rename(self.domain_map[domain]) + else: + assert cd.name == self.domain_map[domain] + new_fragment.add_domains(cd) + + def map_drivers(self, fragment, new_fragment): + for domain, signals in fragment.drivers.items(): + if domain in self.domain_map: + domain = self.domain_map[domain] + for signal in signals: + new_fragment.add_driver(self.on_value(signal), domain) + + +class DomainLowerer(FragmentTransformer, ValueTransformer, StatementTransformer): + def __init__(self, domains=None): + self.domains = domains + + def _resolve(self, domain, context): + if domain not in self.domains: + raise DomainError("Signal {!r} refers to nonexistent domain '{}'" + .format(context, domain)) + return self.domains[domain] + + def map_drivers(self, fragment, new_fragment): + for domain, signal in fragment.iter_drivers(): + new_fragment.add_driver(self.on_value(signal), domain) + + def replace_value_src_loc(self, value, new_value): + return not isinstance(value, (ClockSignal, ResetSignal)) + + def on_ClockSignal(self, value): + domain = self._resolve(value.domain, value) + return domain.clk + + def on_ResetSignal(self, value): + domain = self._resolve(value.domain, value) + if domain.rst is None: + if value.allow_reset_less: + return Const(0) + else: + raise DomainError("Signal {!r} refers to reset of reset-less domain '{}'" + .format(value, value.domain)) + return domain.rst + + def _insert_resets(self, fragment): + for domain_name, signals in fragment.drivers.items(): + if domain_name is None: + continue + domain = fragment.domains[domain_name] + if domain.rst is None: + continue + stmts = [signal.eq(Const(signal.reset, signal.width)) + for signal in signals if not signal.reset_less] + fragment.add_statements(Switch(domain.rst, {1: stmts})) + + def on_fragment(self, fragment): + self.domains = fragment.domains + new_fragment = super().on_fragment(fragment) + self._insert_resets(new_fragment) + return new_fragment + + +class SampleDomainInjector(ValueTransformer, StatementTransformer): + def __init__(self, domain): + self.domain = domain + + def on_Sample(self, value): + if value.domain is not None: + return value + return Sample(value.value, value.clocks, self.domain) + + def __call__(self, stmts): + return self.on_statement(stmts) + + +class SampleLowerer(FragmentTransformer, ValueTransformer, StatementTransformer): + def __init__(self): + self.initial = None + self.sample_cache = None + self.sample_stmts = None + + def _name_reset(self, value): + if isinstance(value, Const): + return "c${}".format(value.value), value.value + elif isinstance(value, Signal): + return "s${}".format(value.name), value.reset + elif isinstance(value, ClockSignal): + return "clk", 0 + elif isinstance(value, ResetSignal): + return "rst", 1 + elif isinstance(value, Initial): + return "init", 0 # Past(Initial()) produces 0, 1, 0, 0, ... + else: + raise NotImplementedError # :nocov: + + def on_Sample(self, value): + if value in self.sample_cache: + return self.sample_cache[value] + + sampled_value = self.on_value(value.value) + if value.clocks == 0: + sample = sampled_value + else: + assert value.domain is not None + sampled_name, sampled_reset = self._name_reset(value.value) + name = "$sample${}${}${}".format(sampled_name, value.domain, value.clocks) + sample = Signal.like(value.value, name=name, reset_less=True, reset=sampled_reset) + sample.attrs["amaranth.sample_reg"] = True + + prev_sample = self.on_Sample(Sample(sampled_value, value.clocks - 1, value.domain)) + if value.domain not in self.sample_stmts: + self.sample_stmts[value.domain] = [] + self.sample_stmts[value.domain].append(sample.eq(prev_sample)) + + self.sample_cache[value] = sample + return sample + + def on_Initial(self, value): + if self.initial is None: + self.initial = Signal(name="init") + return self.initial + + def map_statements(self, fragment, new_fragment): + self.initial = None + self.sample_cache = ValueDict() + self.sample_stmts = OrderedDict() + new_fragment.add_statements(map(self.on_statement, fragment.statements)) + for domain, stmts in self.sample_stmts.items(): + new_fragment.add_statements(stmts) + for stmt in stmts: + new_fragment.add_driver(stmt.lhs, domain) + if self.initial is not None: + new_fragment.add_subfragment(Instance("$initstate", o_Y=self.initial)) + + +class SwitchCleaner(StatementVisitor): + def on_ignore(self, stmt): + return stmt + + on_Assign = on_ignore + on_Assert = on_ignore + on_Assume = on_ignore + on_Cover = on_ignore + + def on_Switch(self, stmt): + cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items()) + if any(len(s) for s in cases.values()): + return Switch(stmt.test, cases) + + def on_statements(self, stmts): + stmts = flatten(self.on_statement(stmt) for stmt in stmts) + return _StatementList(stmt for stmt in stmts if stmt is not None) + + +class LHSGroupAnalyzer(StatementVisitor): + def __init__(self): + self.signals = SignalDict() + self.unions = OrderedDict() + + def find(self, signal): + if signal not in self.signals: + self.signals[signal] = len(self.signals) + group = self.signals[signal] + while group in self.unions: + group = self.unions[group] + self.signals[signal] = group + return group + + def unify(self, root, *leaves): + root_group = self.find(root) + for leaf in leaves: + leaf_group = self.find(leaf) + if root_group == leaf_group: + continue + self.unions[leaf_group] = root_group + + def groups(self): + groups = OrderedDict() + for signal in self.signals: + group = self.find(signal) + if group not in groups: + groups[group] = SignalSet() + groups[group].add(signal) + return groups + + def on_Assign(self, stmt): + lhs_signals = stmt._lhs_signals() + if lhs_signals: + self.unify(*stmt._lhs_signals()) + + def on_property(self, stmt): + lhs_signals = stmt._lhs_signals() + if lhs_signals: + self.unify(*stmt._lhs_signals()) + + on_Assert = on_property + on_Assume = on_property + on_Cover = on_property + + def on_Switch(self, stmt): + for case_stmts in stmt.cases.values(): + self.on_statements(case_stmts) + + def on_statements(self, stmts): + for stmt in stmts: + self.on_statement(stmt) + + def __call__(self, stmts): + self.on_statements(stmts) + return self.groups() + + +class LHSGroupFilter(SwitchCleaner): + def __init__(self, signals): + self.signals = signals + + def on_Assign(self, stmt): + # The invariant provided by LHSGroupAnalyzer is that all signals that ever appear together + # on LHS are a part of the same group, so it is sufficient to check any of them. + lhs_signals = stmt.lhs._lhs_signals() + if lhs_signals: + any_lhs_signal = next(iter(lhs_signals)) + if any_lhs_signal in self.signals: + return stmt + + def on_property(self, stmt): + any_lhs_signal = next(iter(stmt._lhs_signals())) + if any_lhs_signal in self.signals: + return stmt + + on_Assert = on_property + on_Assume = on_property + on_Cover = on_property + + +class _ControlInserter(FragmentTransformer): + def __init__(self, controls): + self.src_loc = None + if isinstance(controls, Value): + controls = {"sync": controls} + self.controls = OrderedDict(controls) + + def on_fragment(self, fragment): + new_fragment = super().on_fragment(fragment) + for domain, signals in fragment.drivers.items(): + if domain is None or domain not in self.controls: + continue + self._insert_control(new_fragment, domain, signals) + return new_fragment + + def _insert_control(self, fragment, domain, signals): + raise NotImplementedError # :nocov: + + def __call__(self, value, *, src_loc_at=0): + self.src_loc = tracer.get_src_loc(src_loc_at=src_loc_at) + return super().__call__(value, src_loc_at=1 + src_loc_at) + + +class ResetInserter(_ControlInserter): + def _insert_control(self, fragment, domain, signals): + stmts = [s.eq(Const(s.reset, s.width)) for s in signals if not s.reset_less] + fragment.add_statements(Switch(self.controls[domain], {1: stmts}, src_loc=self.src_loc)) + + +class EnableInserter(_ControlInserter): + def _insert_control(self, fragment, domain, signals): + stmts = [s.eq(s) for s in signals] + fragment.add_statements(Switch(self.controls[domain], {0: stmts}, src_loc=self.src_loc)) + + def on_fragment(self, fragment): + new_fragment = super().on_fragment(fragment) + if isinstance(new_fragment, Instance) and new_fragment.type in ("$memrd", "$memwr"): + clk_port, clk_dir = new_fragment.named_ports["CLK"] + if isinstance(clk_port, ClockSignal) and clk_port.domain in self.controls: + en_port, en_dir = new_fragment.named_ports["EN"] + en_port = Mux(self.controls[clk_port.domain], en_port, Const(0, len(en_port))) + new_fragment.named_ports["EN"] = en_port, en_dir + return new_fragment diff --git a/amaranth/lib/__init__.py b/amaranth/lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/amaranth/lib/cdc.py b/amaranth/lib/cdc.py new file mode 100644 index 0000000..9278c8e --- /dev/null +++ b/amaranth/lib/cdc.py @@ -0,0 +1,267 @@ +from .. import * + + +__all__ = ["FFSynchronizer", "AsyncFFSynchronizer", "ResetSynchronizer", "PulseSynchronizer"] + + +def _check_stages(stages): + if not isinstance(stages, int) or stages < 1: + raise TypeError("Synchronization stage count must be a positive integer, not {!r}" + .format(stages)) + if stages < 2: + raise ValueError("Synchronization stage count may not safely be less than 2") + + +class FFSynchronizer(Elaboratable): + """Resynchronise a signal to a different clock domain. + + Consists of a chain of flip-flops. Eliminates metastabilities at the output, but provides + no other guarantee as to the safe domain-crossing of a signal. + + Parameters + ---------- + i : Signal(n), in + Signal to be resynchronised. + o : Signal(n), out + Signal connected to synchroniser output. + o_domain : str + Name of output clock domain. + reset : int + Reset value of the flip-flops. On FPGAs, even if ``reset_less`` is True, + the :class:`FFSynchronizer` is still set to this value during initialization. + reset_less : bool + If ``True`` (the default), this :class:`FFSynchronizer` is unaffected by ``o_domain`` + reset. See "Note on Reset" below. + stages : int + Number of synchronization stages between input and output. The lowest safe number is 2, + with higher numbers reducing MTBF further, at the cost of increased latency. + max_input_delay : None or float + Maximum delay from the input signal's clock to the first synchronization stage, in seconds. + If specified and the platform does not support it, elaboration will fail. + + Platform override + ----------------- + Define the ``get_ff_sync`` platform method to override the implementation of + :class:`FFSynchronizer`, e.g. to instantiate library cells directly. + + Note on Reset + ------------- + :class:`FFSynchronizer` is non-resettable by default. Usually this is the safest option; + on FPGAs the :class:`FFSynchronizer` will still be initialized to its ``reset`` value when + the FPGA loads its configuration. + + However, in designs where the value of the :class:`FFSynchronizer` must be valid immediately + after reset, consider setting ``reset_less`` to False if any of the following is true: + + - You are targeting an ASIC, or an FPGA that does not allow arbitrary initial flip-flop states; + - Your design features warm (non-power-on) resets of ``o_domain``, so the one-time + initialization at power on is insufficient; + - Your design features a sequenced reset, and the :class:`FFSynchronizer` must maintain + its reset value until ``o_domain`` reset specifically is deasserted. + + :class:`FFSynchronizer` is reset by the ``o_domain`` reset only. + """ + def __init__(self, i, o, *, o_domain="sync", reset=0, reset_less=True, stages=2, + max_input_delay=None): + _check_stages(stages) + + self.i = i + self.o = o + + self._reset = reset + self._reset_less = reset_less + self._o_domain = o_domain + self._stages = stages + + self._max_input_delay = max_input_delay + + def elaborate(self, platform): + if hasattr(platform, "get_ff_sync"): + return platform.get_ff_sync(self) + + if self._max_input_delay is not None: + raise NotImplementedError("Platform '{}' does not support constraining input delay " + "for FFSynchronizer" + .format(type(platform).__name__)) + + m = Module() + flops = [Signal(self.i.shape(), name="stage{}".format(index), + reset=self._reset, reset_less=self._reset_less) + for index in range(self._stages)] + for i, o in zip((self.i, *flops), flops): + m.d[self._o_domain] += o.eq(i) + m.d.comb += self.o.eq(flops[-1]) + return m + + +class AsyncFFSynchronizer(Elaboratable): + """Synchronize deassertion of an asynchronous signal. + + The signal driven by the :class:`AsyncFFSynchronizer` is asserted asynchronously and deasserted + synchronously, eliminating metastability during deassertion. + + This synchronizer is primarily useful for resets and reset-like signals. + + Parameters + ---------- + i : Signal(1), in + Asynchronous input signal, to be synchronized. + o : Signal(1), out + Synchronously released output signal. + o_domain : str + Name of clock domain to synchronize to. + stages : int, >=2 + Number of synchronization stages between input and output. The lowest safe number is 2, + with higher numbers reducing MTBF further, at the cost of increased deassertion latency. + async_edge : str + The edge of the input signal which causes the output to be set. Must be one of "pos" or "neg". + max_input_delay : None or float + Maximum delay from the input signal's clock to the first synchronization stage, in seconds. + If specified and the platform does not support it, elaboration will fail. + + Platform override + ----------------- + Define the ``get_async_ff_sync`` platform method to override the implementation of + :class:`AsyncFFSynchronizer`, e.g. to instantiate library cells directly. + """ + def __init__(self, i, o, *, o_domain="sync", stages=2, async_edge="pos", max_input_delay=None): + _check_stages(stages) + + if len(i) != 1: + raise ValueError("AsyncFFSynchronizer input width must be 1, not {}" + .format(len(i))) + if len(o) != 1: + raise ValueError("AsyncFFSynchronizer output width must be 1, not {}" + .format(len(o))) + + if async_edge not in ("pos", "neg"): + raise ValueError("AsyncFFSynchronizer async edge must be one of 'pos' or 'neg', " + "not {!r}" + .format(async_edge)) + + self.i = i + self.o = o + + self._o_domain = o_domain + self._stages = stages + + self._edge = async_edge + + self._max_input_delay = max_input_delay + + def elaborate(self, platform): + if hasattr(platform, "get_async_ff_sync"): + return platform.get_async_ff_sync(self) + + if self._max_input_delay is not None: + raise NotImplementedError("Platform '{}' does not support constraining input delay " + "for AsyncFFSynchronizer" + .format(type(platform).__name__)) + + m = Module() + m.domains += ClockDomain("async_ff", async_reset=True, local=True) + flops = [Signal(1, name="stage{}".format(index), reset=1) + for index in range(self._stages)] + for i, o in zip((0, *flops), flops): + m.d.async_ff += o.eq(i) + + if self._edge == "pos": + m.d.comb += ResetSignal("async_ff").eq(self.i) + else: + m.d.comb += ResetSignal("async_ff").eq(~self.i) + + m.d.comb += [ + ClockSignal("async_ff").eq(ClockSignal(self._o_domain)), + self.o.eq(flops[-1]) + ] + + return m + + +class ResetSynchronizer(Elaboratable): + """Synchronize deassertion of a clock domain reset. + + The reset of the clock domain driven by the :class:`ResetSynchronizer` is asserted + asynchronously and deasserted synchronously, eliminating metastability during deassertion. + + The driven clock domain could use a reset that is asserted either synchronously or + asynchronously; a reset is always deasserted synchronously. A domain with an asynchronously + asserted reset is useful if the clock of the domain may be gated, yet the domain still + needs to be reset promptly; otherwise, synchronously asserted reset (the default) should + be used. + + Parameters + ---------- + arst : Signal(1), in + Asynchronous reset signal, to be synchronized. + domain : str + Name of clock domain to reset. + stages : int, >=2 + Number of synchronization stages between input and output. The lowest safe number is 2, + with higher numbers reducing MTBF further, at the cost of increased deassertion latency. + max_input_delay : None or float + Maximum delay from the input signal's clock to the first synchronization stage, in seconds. + If specified and the platform does not support it, elaboration will fail. + + Platform override + ----------------- + Define the ``get_reset_sync`` platform method to override the implementation of + :class:`ResetSynchronizer`, e.g. to instantiate library cells directly. + """ + def __init__(self, arst, *, domain="sync", stages=2, max_input_delay=None): + _check_stages(stages) + + self.arst = arst + + self._domain = domain + self._stages = stages + + self._max_input_delay = max_input_delay + + def elaborate(self, platform): + return AsyncFFSynchronizer(self.arst, ResetSignal(self._domain), o_domain=self._domain, + stages=self._stages, max_input_delay=self._max_input_delay) + + +class PulseSynchronizer(Elaboratable): + """A one-clock pulse on the input produces a one-clock pulse on the output. + + If the output clock is faster than the input clock, then the input may be safely asserted at + 100% duty cycle. Otherwise, if the clock ratio is `n`:1, the input may be asserted at most once + in every `n` input clocks, else pulses may be dropped. Other than this there is no constraint + on the ratio of input and output clock frequency. + + Parameters + ---------- + i_domain : str + Name of input clock domain. + o_domain : str + Name of output clock domain. + stages : int, >=2 + Number of synchronization stages between input and output. The lowest safe number is 2, + with higher numbers reducing MTBF further, at the cost of increased deassertion latency. + """ + def __init__(self, i_domain, o_domain, *, stages=2): + _check_stages(stages) + + self.i = Signal() + self.o = Signal() + + self._i_domain = i_domain + self._o_domain = o_domain + self._stages = stages + + def elaborate(self, platform): + m = Module() + + i_toggle = Signal() + o_toggle = Signal() + r_toggle = Signal() + ff_sync = m.submodules.ff_sync = \ + FFSynchronizer(i_toggle, o_toggle, o_domain=self._o_domain, stages=self._stages) + + m.d[self._i_domain] += i_toggle.eq(i_toggle ^ self.i) + m.d[self._o_domain] += r_toggle.eq(o_toggle) + m.d.comb += self.o.eq(o_toggle ^ r_toggle) + + return m diff --git a/amaranth/lib/coding.py b/amaranth/lib/coding.py new file mode 100644 index 0000000..5abfbd7 --- /dev/null +++ b/amaranth/lib/coding.py @@ -0,0 +1,186 @@ +"""Encoders and decoders between binary and one-hot representation.""" + +from .. import * + + +__all__ = [ + "Encoder", "Decoder", + "PriorityEncoder", "PriorityDecoder", + "GrayEncoder", "GrayDecoder", +] + + +class Encoder(Elaboratable): + """Encode one-hot to binary. + + If one bit in ``i`` is asserted, ``n`` is low and ``o`` indicates the asserted bit. + Otherwise, ``n`` is high and ``o`` is ``0``. + + Parameters + ---------- + width : int + Bit width of the input + + Attributes + ---------- + i : Signal(width), in + One-hot input. + o : Signal(range(width)), out + Encoded binary. + n : Signal, out + Invalid: either none or multiple input bits are asserted. + """ + def __init__(self, width): + self.width = width + + self.i = Signal(width) + self.o = Signal(range(width)) + self.n = Signal() + + def elaborate(self, platform): + m = Module() + with m.Switch(self.i): + for j in range(self.width): + with m.Case(1 << j): + m.d.comb += self.o.eq(j) + with m.Case(): + m.d.comb += self.n.eq(1) + return m + + +class PriorityEncoder(Elaboratable): + """Priority encode requests to binary. + + If any bit in ``i`` is asserted, ``n`` is low and ``o`` indicates the least significant + asserted bit. + Otherwise, ``n`` is high and ``o`` is ``0``. + + Parameters + ---------- + width : int + Bit width of the input. + + Attributes + ---------- + i : Signal(width), in + Input requests. + o : Signal(range(width)), out + Encoded binary. + n : Signal, out + Invalid: no input bits are asserted. + """ + def __init__(self, width): + self.width = width + + self.i = Signal(width) + self.o = Signal(range(width)) + self.n = Signal() + + def elaborate(self, platform): + m = Module() + for j in reversed(range(self.width)): + with m.If(self.i[j]): + m.d.comb += self.o.eq(j) + m.d.comb += self.n.eq(self.i == 0) + return m + + +class Decoder(Elaboratable): + """Decode binary to one-hot. + + If ``n`` is low, only the ``i``th bit in ``o`` is asserted. + If ``n`` is high, ``o`` is ``0``. + + Parameters + ---------- + width : int + Bit width of the output. + + Attributes + ---------- + i : Signal(range(width)), in + Input binary. + o : Signal(width), out + Decoded one-hot. + n : Signal, in + Invalid, no output bits are to be asserted. + """ + def __init__(self, width): + self.width = width + + self.i = Signal(range(width)) + self.n = Signal() + self.o = Signal(width) + + def elaborate(self, platform): + m = Module() + with m.Switch(self.i): + for j in range(len(self.o)): + with m.Case(j): + m.d.comb += self.o.eq(1 << j) + with m.If(self.n): + m.d.comb += self.o.eq(0) + return m + + +class PriorityDecoder(Decoder): + """Decode binary to priority request. + + Identical to :class:`Decoder`. + """ + + +class GrayEncoder(Elaboratable): + """Encode binary to Gray code. + + Parameters + ---------- + width : int + Bit width. + + Attributes + ---------- + i : Signal(width), in + Input natural binary. + o : Signal(width), out + Encoded Gray code. + """ + def __init__(self, width): + self.width = width + + self.i = Signal(width) + self.o = Signal(width) + + def elaborate(self, platform): + m = Module() + m.d.comb += self.o.eq(self.i ^ self.i[1:]) + return m + + +class GrayDecoder(Elaboratable): + """Decode Gray code to binary. + + Parameters + ---------- + width : int + Bit width. + + Attributes + ---------- + i : Signal(width), in + Input Gray code. + o : Signal(width), out + Decoded natural binary. + """ + def __init__(self, width): + self.width = width + + self.i = Signal(width) + self.o = Signal(width) + + def elaborate(self, platform): + m = Module() + m.d.comb += self.o[-1].eq(self.i[-1]) + for i in reversed(range(self.width - 1)): + m.d.comb += self.o[i].eq(self.o[i + 1] ^ self.i[i]) + return m diff --git a/amaranth/lib/fifo.py b/amaranth/lib/fifo.py new file mode 100644 index 0000000..eaabd6f --- /dev/null +++ b/amaranth/lib/fifo.py @@ -0,0 +1,529 @@ +"""First-in first-out queues.""" + +from .. import * +from ..asserts import * +from .._utils import log2_int +from .coding import GrayEncoder, GrayDecoder +from .cdc import FFSynchronizer, AsyncFFSynchronizer + + +__all__ = ["FIFOInterface", "SyncFIFO", "SyncFIFOBuffered", "AsyncFIFO", "AsyncFIFOBuffered"] + + +class FIFOInterface: + _doc_template = """ + {description} + + Parameters + ---------- + width : int + Bit width of data entries. + depth : int + Depth of the queue. If zero, the FIFO cannot be read from or written to. + {parameters} + + Attributes + ---------- + {attributes} + w_data : in, width + Input data. + w_rdy : out + Asserted if there is space in the queue, i.e. ``w_en`` can be asserted to write + a new entry. + w_en : in + Write strobe. Latches ``w_data`` into the queue. Does nothing if ``w_rdy`` is not asserted. + w_level : out + Number of unread entries. + {w_attributes} + r_data : out, width + Output data. {r_data_valid} + r_rdy : out + Asserted if there is an entry in the queue, i.e. ``r_en`` can be asserted to read + an existing entry. + r_en : in + Read strobe. Makes the next entry (if any) available on ``r_data`` at the next cycle. + Does nothing if ``r_rdy`` is not asserted. + r_level : out + Number of unread entries. + {r_attributes} + """ + + __doc__ = _doc_template.format(description=""" + Data written to the input interface (``w_data``, ``w_rdy``, ``w_en``) is buffered and can be + read at the output interface (``r_data``, ``r_rdy``, ``r_en`). The data entry written first + to the input also appears first on the output. + """, + parameters="", + r_data_valid="The conditions in which ``r_data`` is valid depends on the type of the queue.", + attributes=""" + fwft : bool + First-word fallthrough. If set, when ``r_rdy`` rises, the first entry is already + available, i.e. ``r_data`` is valid. Otherwise, after ``r_rdy`` rises, it is necessary + to strobe ``r_en`` for ``r_data`` to become valid. + """.strip(), + w_attributes="", + r_attributes="") + + def __init__(self, *, width, depth, fwft): + if not isinstance(width, int) or width < 0: + raise TypeError("FIFO width must be a non-negative integer, not {!r}" + .format(width)) + if not isinstance(depth, int) or depth < 0: + raise TypeError("FIFO depth must be a non-negative integer, not {!r}" + .format(depth)) + self.width = width + self.depth = depth + self.fwft = fwft + + self.w_data = Signal(width, reset_less=True) + self.w_rdy = Signal() # writable; not full + self.w_en = Signal() + self.w_level = Signal(range(depth + 1)) + + self.r_data = Signal(width, reset_less=True) + self.r_rdy = Signal() # readable; not empty + self.r_en = Signal() + self.r_level = Signal(range(depth + 1)) + + +def _incr(signal, modulo): + if modulo == 2 ** len(signal): + return signal + 1 + else: + return Mux(signal == modulo - 1, 0, signal + 1) + + +class SyncFIFO(Elaboratable, FIFOInterface): + __doc__ = FIFOInterface._doc_template.format( + description=""" + Synchronous first in, first out queue. + + Read and write interfaces are accessed from the same clock domain. If different clock domains + are needed, use :class:`AsyncFIFO`. + """.strip(), + parameters=""" + fwft : bool + First-word fallthrough. If set, when the queue is empty and an entry is written into it, + that entry becomes available on the output on the same clock cycle. Otherwise, it is + necessary to assert ``r_en`` for ``r_data`` to become valid. + """.strip(), + r_data_valid=""" + For FWFT queues, valid if ``r_rdy`` is asserted. For non-FWFT queues, valid on the next + cycle after ``r_rdy`` and ``r_en`` have been asserted. + """.strip(), + attributes="", + r_attributes="", + w_attributes="") + + def __init__(self, *, width, depth, fwft=True): + super().__init__(width=width, depth=depth, fwft=fwft) + + self.level = Signal(range(depth + 1)) + + def elaborate(self, platform): + m = Module() + if self.depth == 0: + m.d.comb += [ + self.w_rdy.eq(0), + self.r_rdy.eq(0), + ] + return m + + m.d.comb += [ + self.w_rdy.eq(self.level != self.depth), + self.r_rdy.eq(self.level != 0), + self.w_level.eq(self.level), + self.r_level.eq(self.level), + ] + + do_read = self.r_rdy & self.r_en + do_write = self.w_rdy & self.w_en + + storage = Memory(width=self.width, depth=self.depth) + w_port = m.submodules.w_port = storage.write_port() + r_port = m.submodules.r_port = storage.read_port( + domain="comb" if self.fwft else "sync", transparent=self.fwft) + produce = Signal(range(self.depth)) + consume = Signal(range(self.depth)) + + m.d.comb += [ + w_port.addr.eq(produce), + w_port.data.eq(self.w_data), + w_port.en.eq(self.w_en & self.w_rdy), + ] + with m.If(do_write): + m.d.sync += produce.eq(_incr(produce, self.depth)) + + m.d.comb += [ + r_port.addr.eq(consume), + self.r_data.eq(r_port.data), + ] + if not self.fwft: + m.d.comb += r_port.en.eq(self.r_en) + with m.If(do_read): + m.d.sync += consume.eq(_incr(consume, self.depth)) + + with m.If(do_write & ~do_read): + m.d.sync += self.level.eq(self.level + 1) + with m.If(do_read & ~do_write): + m.d.sync += self.level.eq(self.level - 1) + + if platform == "formal": + # TODO: move this logic to SymbiYosys + with m.If(Initial()): + m.d.comb += [ + Assume(produce < self.depth), + Assume(consume < self.depth), + ] + with m.If(produce == consume): + m.d.comb += Assume((self.level == 0) | (self.level == self.depth)) + with m.If(produce > consume): + m.d.comb += Assume(self.level == (produce - consume)) + with m.If(produce < consume): + m.d.comb += Assume(self.level == (self.depth + produce - consume)) + with m.Else(): + m.d.comb += [ + Assert(produce < self.depth), + Assert(consume < self.depth), + ] + with m.If(produce == consume): + m.d.comb += Assert((self.level == 0) | (self.level == self.depth)) + with m.If(produce > consume): + m.d.comb += Assert(self.level == (produce - consume)) + with m.If(produce < consume): + m.d.comb += Assert(self.level == (self.depth + produce - consume)) + + return m + + +class SyncFIFOBuffered(Elaboratable, FIFOInterface): + __doc__ = FIFOInterface._doc_template.format( + description=""" + Buffered synchronous first in, first out queue. + + This queue's interface is identical to :class:`SyncFIFO` configured as ``fwft=True``, but it + does not use asynchronous memory reads, which are incompatible with FPGA block RAMs. + + In exchange, the latency between an entry being written to an empty queue and that entry + becoming available on the output is increased by one cycle compared to :class:`SyncFIFO`. + """.strip(), + parameters=""" + fwft : bool + Always set. + """.strip(), + attributes="", + r_data_valid="Valid if ``r_rdy`` is asserted.", + r_attributes=""" + level : out + Number of unread entries. + """.strip(), + w_attributes="") + + def __init__(self, *, width, depth): + super().__init__(width=width, depth=depth, fwft=True) + + self.level = Signal(range(depth + 1)) + + def elaborate(self, platform): + m = Module() + if self.depth == 0: + m.d.comb += [ + self.w_rdy.eq(0), + self.r_rdy.eq(0), + ] + return m + + # Effectively, this queue treats the output register of the non-FWFT inner queue as + # an additional storage element. + m.submodules.unbuffered = fifo = SyncFIFO(width=self.width, depth=self.depth - 1, + fwft=False) + + m.d.comb += [ + fifo.w_data.eq(self.w_data), + fifo.w_en.eq(self.w_en), + self.w_rdy.eq(fifo.w_rdy), + ] + + m.d.comb += [ + self.r_data.eq(fifo.r_data), + fifo.r_en.eq(fifo.r_rdy & (~self.r_rdy | self.r_en)), + ] + with m.If(fifo.r_en): + m.d.sync += self.r_rdy.eq(1) + with m.Elif(self.r_en): + m.d.sync += self.r_rdy.eq(0) + + m.d.comb += [ + self.level.eq(fifo.level + self.r_rdy), + self.w_level.eq(self.level), + self.r_level.eq(self.level), + ] + + return m + + +class AsyncFIFO(Elaboratable, FIFOInterface): + __doc__ = FIFOInterface._doc_template.format( + description=""" + Asynchronous first in, first out queue. + + Read and write interfaces are accessed from different clock domains, which can be set when + constructing the FIFO. + + :class:`AsyncFIFO` can be reset from the write clock domain. When the write domain reset is + asserted, the FIFO becomes empty. When the read domain is reset, data remains in the FIFO - the + read domain logic should correctly handle this case. + + :class:`AsyncFIFO` only supports power of 2 depths. Unless ``exact_depth`` is specified, + the ``depth`` parameter is rounded up to the next power of 2. + """.strip(), + parameters=""" + r_domain : str + Read clock domain. + w_domain : str + Write clock domain. + """.strip(), + attributes=""" + fwft : bool + Always set. + """.strip(), + r_data_valid="Valid if ``r_rdy`` is asserted.", + r_attributes=""" + r_rst : Signal, out + Asserted while the FIFO is being reset by the write-domain reset (for at least one + read-domain clock cycle). + """.strip(), + w_attributes="") + + def __init__(self, *, width, depth, r_domain="read", w_domain="write", exact_depth=False): + if depth != 0: + try: + depth_bits = log2_int(depth, need_pow2=exact_depth) + depth = 1 << depth_bits + except ValueError: + raise ValueError("AsyncFIFO only supports depths that are powers of 2; requested " + "exact depth {} is not" + .format(depth)) from None + else: + depth_bits = 0 + super().__init__(width=width, depth=depth, fwft=True) + + self.r_rst = Signal() + self._r_domain = r_domain + self._w_domain = w_domain + self._ctr_bits = depth_bits + 1 + + def elaborate(self, platform): + m = Module() + if self.depth == 0: + m.d.comb += [ + self.w_rdy.eq(0), + self.r_rdy.eq(0), + ] + return m + + # The design of this queue is the "style #2" from Clifford E. Cummings' paper "Simulation + # and Synthesis Techniques for Asynchronous FIFO Design": + # http://www.sunburst-design.com/papers/CummingsSNUG2002SJ_FIFO1.pdf + + do_write = self.w_rdy & self.w_en + do_read = self.r_rdy & self.r_en + + # TODO: extract this pattern into lib.cdc.GrayCounter + produce_w_bin = Signal(self._ctr_bits) + produce_w_nxt = Signal(self._ctr_bits) + m.d.comb += produce_w_nxt.eq(produce_w_bin + do_write) + m.d[self._w_domain] += produce_w_bin.eq(produce_w_nxt) + + # Note: Both read-domain counters must be reset_less (see comments below) + consume_r_bin = Signal(self._ctr_bits, reset_less=True) + consume_r_nxt = Signal(self._ctr_bits) + m.d.comb += consume_r_nxt.eq(consume_r_bin + do_read) + m.d[self._r_domain] += consume_r_bin.eq(consume_r_nxt) + + produce_w_gry = Signal(self._ctr_bits) + produce_r_gry = Signal(self._ctr_bits) + produce_enc = m.submodules.produce_enc = \ + GrayEncoder(self._ctr_bits) + produce_cdc = m.submodules.produce_cdc = \ + FFSynchronizer(produce_w_gry, produce_r_gry, o_domain=self._r_domain) + m.d.comb += produce_enc.i.eq(produce_w_nxt), + m.d[self._w_domain] += produce_w_gry.eq(produce_enc.o) + + consume_r_gry = Signal(self._ctr_bits, reset_less=True) + consume_w_gry = Signal(self._ctr_bits) + consume_enc = m.submodules.consume_enc = \ + GrayEncoder(self._ctr_bits) + consume_cdc = m.submodules.consume_cdc = \ + FFSynchronizer(consume_r_gry, consume_w_gry, o_domain=self._w_domain) + m.d.comb += consume_enc.i.eq(consume_r_nxt) + m.d[self._r_domain] += consume_r_gry.eq(consume_enc.o) + + consume_w_bin = Signal(self._ctr_bits) + consume_dec = m.submodules.consume_dec = \ + GrayDecoder(self._ctr_bits) + m.d.comb += consume_dec.i.eq(consume_w_gry), + m.d[self._w_domain] += consume_w_bin.eq(consume_dec.o) + + produce_r_bin = Signal(self._ctr_bits) + produce_dec = m.submodules.produce_dec = \ + GrayDecoder(self._ctr_bits) + m.d.comb += produce_dec.i.eq(produce_r_gry), + m.d.comb += produce_r_bin.eq(produce_dec.o) + + w_full = Signal() + r_empty = Signal() + m.d.comb += [ + w_full.eq((produce_w_gry[-1] != consume_w_gry[-1]) & + (produce_w_gry[-2] != consume_w_gry[-2]) & + (produce_w_gry[:-2] == consume_w_gry[:-2])), + r_empty.eq(consume_r_gry == produce_r_gry), + ] + + m.d[self._w_domain] += self.w_level.eq((produce_w_bin - consume_w_bin)) + m.d.comb += self.r_level.eq((produce_r_bin - consume_r_bin)) + + storage = Memory(width=self.width, depth=self.depth) + w_port = m.submodules.w_port = storage.write_port(domain=self._w_domain) + r_port = m.submodules.r_port = storage.read_port (domain=self._r_domain, + transparent=False) + m.d.comb += [ + w_port.addr.eq(produce_w_bin[:-1]), + w_port.data.eq(self.w_data), + w_port.en.eq(do_write), + self.w_rdy.eq(~w_full), + ] + m.d.comb += [ + r_port.addr.eq(consume_r_nxt[:-1]), + self.r_data.eq(r_port.data), + r_port.en.eq(1), + self.r_rdy.eq(~r_empty), + ] + + # Reset handling to maintain FIFO and CDC invariants in the presence of a write-domain + # reset. + # There is a CDC hazard associated with resetting an async FIFO - Gray code counters which + # are reset to 0 violate their Gray code invariant. One way to handle this is to ensure + # that both sides of the FIFO are asynchronously reset by the same signal. We adopt a + # slight variation on this approach - reset control rests entirely with the write domain. + # The write domain's reset signal is used to asynchronously reset the read domain's + # counters and force the FIFO to be empty when the write domain's reset is asserted. + # This requires the two read domain counters to be marked as "reset_less", as they are + # reset through another mechanism. See https://github.com/amaranth-lang/amaranth/issues/181 + # for the full discussion. + w_rst = ResetSignal(domain=self._w_domain, allow_reset_less=True) + r_rst = Signal() + + # Async-set-sync-release synchronizer avoids CDC hazards + rst_cdc = m.submodules.rst_cdc = \ + AsyncFFSynchronizer(w_rst, r_rst, o_domain=self._r_domain) + + # Decode Gray code counter synchronized from write domain to overwrite binary + # counter in read domain. + rst_dec = m.submodules.rst_dec = \ + GrayDecoder(self._ctr_bits) + m.d.comb += rst_dec.i.eq(produce_r_gry) + with m.If(r_rst): + m.d.comb += r_empty.eq(1) + m.d[self._r_domain] += consume_r_gry.eq(produce_r_gry) + m.d[self._r_domain] += consume_r_bin.eq(rst_dec.o) + m.d[self._r_domain] += self.r_rst.eq(1) + with m.Else(): + m.d[self._r_domain] += self.r_rst.eq(0) + + if platform == "formal": + with m.If(Initial()): + m.d.comb += Assume(produce_w_gry == (produce_w_bin ^ produce_w_bin[1:])) + m.d.comb += Assume(consume_r_gry == (consume_r_bin ^ consume_r_bin[1:])) + + return m + + +class AsyncFIFOBuffered(Elaboratable, FIFOInterface): + __doc__ = FIFOInterface._doc_template.format( + description=""" + Buffered asynchronous first in, first out queue. + + Read and write interfaces are accessed from different clock domains, which can be set when + constructing the FIFO. + + :class:`AsyncFIFOBuffered` only supports power of 2 plus one depths. Unless ``exact_depth`` + is specified, the ``depth`` parameter is rounded up to the next power of 2 plus one. + (The output buffer acts as an additional queue element.) + + This queue's interface is identical to :class:`AsyncFIFO`, but it has an additional register + on the output, improving timing in case of block RAM that has large clock-to-output delay. + + In exchange, the latency between an entry being written to an empty queue and that entry + becoming available on the output is increased by one cycle compared to :class:`AsyncFIFO`. + """.strip(), + parameters=""" + r_domain : str + Read clock domain. + w_domain : str + Write clock domain. + """.strip(), + attributes=""" + fwft : bool + Always set. + """.strip(), + r_data_valid="Valid if ``r_rdy`` is asserted.", + r_attributes=""" + r_rst : Signal, out + Asserted while the FIFO is being reset by the write-domain reset (for at least one + read-domain clock cycle). + """.strip(), + w_attributes="") + + def __init__(self, *, width, depth, r_domain="read", w_domain="write", exact_depth=False): + if depth != 0: + try: + depth_bits = log2_int(max(0, depth - 1), need_pow2=exact_depth) + depth = (1 << depth_bits) + 1 + except ValueError: + raise ValueError("AsyncFIFOBuffered only supports depths that are one higher " + "than powers of 2; requested exact depth {} is not" + .format(depth)) from None + super().__init__(width=width, depth=depth, fwft=True) + + self.r_rst = Signal() + self._r_domain = r_domain + self._w_domain = w_domain + + def elaborate(self, platform): + m = Module() + if self.depth == 0: + m.d.comb += [ + self.w_rdy.eq(0), + self.r_rdy.eq(0), + ] + return m + + m.submodules.unbuffered = fifo = AsyncFIFO(width=self.width, depth=self.depth - 1, + r_domain=self._r_domain, w_domain=self._w_domain) + + m.d.comb += [ + fifo.w_data.eq(self.w_data), + self.w_rdy.eq(fifo.w_rdy), + fifo.w_en.eq(self.w_en), + ] + + r_consume_buffered = Signal() + m.d.comb += r_consume_buffered.eq((self.r_rdy - self.r_en) & self.r_rdy) + m.d[self._r_domain] += self.r_level.eq(fifo.r_level + r_consume_buffered) + + w_consume_buffered = Signal() + m.submodules.consume_buffered_cdc = FFSynchronizer(r_consume_buffered, w_consume_buffered, o_domain=self._w_domain, stages=4) + m.d.comb += self.w_level.eq(fifo.w_level + w_consume_buffered) + + with m.If(self.r_en | ~self.r_rdy): + m.d[self._r_domain] += [ + self.r_data.eq(fifo.r_data), + self.r_rdy.eq(fifo.r_rdy), + self.r_rst.eq(fifo.r_rst), + ] + m.d.comb += [ + fifo.r_en.eq(1) + ] + + return m diff --git a/amaranth/lib/io.py b/amaranth/lib/io.py new file mode 100644 index 0000000..9776eff --- /dev/null +++ b/amaranth/lib/io.py @@ -0,0 +1,116 @@ +from .. import * +from ..hdl.rec import * + + +__all__ = ["pin_layout", "Pin"] + + +def pin_layout(width, dir, xdr=0): + """ + Layout of the platform interface of a pin or several pins, which may be used inside + user-defined records. + + See :class:`Pin` for details. + """ + if not isinstance(width, int) or width < 1: + raise TypeError("Width must be a positive integer, not {!r}" + .format(width)) + if dir not in ("i", "o", "oe", "io"): + raise TypeError("Direction must be one of \"i\", \"o\", \"io\", or \"oe\", not {!r}""" + .format(dir)) + if not isinstance(xdr, int) or xdr < 0: + raise TypeError("Gearing ratio must be a non-negative integer, not {!r}" + .format(xdr)) + + fields = [] + if dir in ("i", "io"): + if xdr > 0: + fields.append(("i_clk", 1)) + if xdr > 2: + fields.append(("i_fclk", 1)) + if xdr in (0, 1): + fields.append(("i", width)) + else: + for n in range(xdr): + fields.append(("i{}".format(n), width)) + if dir in ("o", "oe", "io"): + if xdr > 0: + fields.append(("o_clk", 1)) + if xdr > 2: + fields.append(("o_fclk", 1)) + if xdr in (0, 1): + fields.append(("o", width)) + else: + for n in range(xdr): + fields.append(("o{}".format(n), width)) + if dir in ("oe", "io"): + fields.append(("oe", 1)) + return Layout(fields) + + +class Pin(Record): + """ + An interface to an I/O buffer or a group of them that provides uniform access to input, output, + or tristate buffers that may include a 1:n gearbox. (A 1:2 gearbox is typically called "DDR".) + + A :class:`Pin` is identical to a :class:`Record` that uses the corresponding :meth:`pin_layout` + except that it allos accessing the parameters like ``width`` as attributes. It is legal to use + a plain :class:`Record` anywhere a :class:`Pin` is used, provided that these attributes are + not necessary. + + Parameters + ---------- + width : int + Width of the ``i``/``iN`` and ``o``/``oN`` signals. + dir : ``"i"``, ``"o"``, ``"io"``, ``"oe"`` + Direction of the buffers. If ``"i"`` is specified, only the ``i``/``iN`` signals are + present. If ``"o"`` is specified, only the ``o``/``oN`` signals are present. If ``"oe"`` is + specified, the ``o``/``oN`` signals are present, and an ``oe`` signal is present. + If ``"io"`` is specified, both the ``i``/``iN`` and ``o``/``oN`` signals are present, and + an ``oe`` signal is present. + xdr : int + Gearbox ratio. If equal to 0, the I/O buffer is combinatorial, and only ``i``/``o`` + signals are present. If equal to 1, the I/O buffer is SDR, and only ``i``/``o`` signals are + present. If greater than 1, the I/O buffer includes a gearbox, and ``iN``/``oN`` signals + are present instead, where ``N in range(0, N)``. For example, if ``xdr=2``, the I/O buffer + is DDR; the signal ``i0`` reflects the value at the rising edge, and the signal ``i1`` + reflects the value at the falling edge. + name : str + Name of the underlying record. + + Attributes + ---------- + i_clk: + I/O buffer input clock. Synchronizes `i*`. Present if ``xdr`` is nonzero. + i_fclk: + I/O buffer input fast clock. Synchronizes `i*` on higer gearbox ratios. Present if ``xdr`` + is greater than 2. + i : Signal, out + I/O buffer input, without gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is + equal to 0 or 1. + i0, i1, ... : Signal, out + I/O buffer inputs, with gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is + greater than 1. + o_clk: + I/O buffer output clock. Synchronizes `o*`, including `oe`. Present if ``xdr`` is nonzero. + o_fclk: + I/O buffer output fast clock. Synchronizes `o*` on higher gearbox ratios. Present if + ``xdr`` is greater than 2. + o : Signal, in + I/O buffer output, without gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is + equal to 0 or 1. + o0, o1, ... : Signal, in + I/O buffer outputs, with gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is + greater than 1. + oe : Signal, in + I/O buffer output enable. Present if ``dir="io"`` or ``dir="oe"``. Buffers generally + cannot change direction more than once per cycle, so at most one output enable signal + is present. + """ + def __init__(self, width, dir, *, xdr=0, name=None, src_loc_at=0): + self.width = width + self.dir = dir + self.xdr = xdr + + super().__init__(pin_layout(self.width, self.dir, self.xdr), + name=name, src_loc_at=src_loc_at + 1) diff --git a/amaranth/lib/scheduler.py b/amaranth/lib/scheduler.py new file mode 100644 index 0000000..47bc38f --- /dev/null +++ b/amaranth/lib/scheduler.py @@ -0,0 +1,60 @@ +from .. import * + + +__all__ = ["RoundRobin"] + + +class RoundRobin(Elaboratable): + """Round-robin scheduler. + + For a given set of requests, the round-robin scheduler will + grant one request. Once it grants a request, if any other + requests are active, it grants the next active request with + a greater number, restarting from zero once it reaches the + highest one. + + Use :class:`EnableInserter` to control when the scheduler + is updated. + + Parameters + ---------- + count : int + Number of requests. + + Attributes + ---------- + requests : Signal(count), in + Set of requests. + grant : Signal(range(count)), out + Number of the granted request. Does not change if there are no + active requests. + valid : Signal(), out + Asserted if grant corresponds to an active request. Deasserted + otherwise, i.e. if no requests are active. + """ + def __init__(self, *, count): + if not isinstance(count, int) or count < 0: + raise ValueError("Count must be a non-negative integer, not {!r}" + .format(count)) + self.count = count + + self.requests = Signal(count) + self.grant = Signal(range(count)) + self.valid = Signal() + + def elaborate(self, platform): + m = Module() + + with m.Switch(self.grant): + for i in range(self.count): + with m.Case(i): + for pred in reversed(range(i)): + with m.If(self.requests[pred]): + m.d.sync += self.grant.eq(pred) + for succ in reversed(range(i + 1, self.count)): + with m.If(self.requests[succ]): + m.d.sync += self.grant.eq(succ) + + m.d.sync += self.valid.eq(self.requests.any()) + + return m diff --git a/amaranth/rpc.py b/amaranth/rpc.py new file mode 100644 index 0000000..450ce16 --- /dev/null +++ b/amaranth/rpc.py @@ -0,0 +1,111 @@ +import sys +import json +import argparse +import importlib + +from .hdl import Signal, Record, Elaboratable +from .back import rtlil + + +__all__ = ["main"] + + +def _collect_modules(names): + modules = {} + for name in names: + py_module_name, py_class_name = name.rsplit(".", 1) + py_module = importlib.import_module(py_module_name) + if py_class_name == "*": + for py_class_name in py_module.__all__: + py_class = py_module.__dict__[py_class_name] + if not issubclass(py_class, Elaboratable): + continue + modules["{}.{}".format(py_module_name, py_class_name)] = py_class + else: + py_class = py_module.__dict__[py_class_name] + if not isinstance(py_class, type) or not issubclass(py_class, Elaboratable): + raise TypeError("{}.{} is not a class inheriting from Elaboratable" + .format(py_module_name, py_class_name)) + modules[name] = py_class + return modules + + +def _serve_yosys(modules): + while True: + request_json = sys.stdin.readline() + if not request_json: break + request = json.loads(request_json) + + if request["method"] == "modules": + response = {"modules": list(modules.keys())} + + elif request["method"] == "derive": + module_name = request["module"] + + args, kwargs = [], {} + for parameter_name, parameter in request["parameters"].items(): + if parameter["type"] == "unsigned": + parameter_value = int(parameter["value"], 2) + elif parameter["type"] == "signed": + width = len(parameter["value"]) + parameter_value = int(parameter["value"], 2) + if parameter_value & (1 << (width - 1)): + parameter_value = -((1 << width) - parameter_value) + elif parameter["type"] == "string": + parameter_value = parameter["value"] + elif parameter["type"] == "real": + parameter_value = float(parameter["value"]) + else: + raise NotImplementedError("Unrecognized parameter type {}" + .format(parameter_name)) + if parameter_name.startswith("$"): + index = int(parameter_name[1:]) + while len(args) < index: + args.append(None) + args[index] = parameter_value + if parameter_name.startswith("\\"): + kwargs[parameter_name[1:]] = parameter_value + + try: + elaboratable = modules[module_name](*args, **kwargs) + ports = [] + # By convention, any public attribute that is a Signal or a Record is + # considered a port. + for port_name, port in vars(elaboratable).items(): + if not port_name.startswith("_") and isinstance(port, (Signal, Record)): + ports += port._lhs_signals() + rtlil_text = rtlil.convert(elaboratable, name=module_name, ports=ports) + response = {"frontend": "ilang", "source": rtlil_text} + except Exception as error: + response = {"error": "{}: {}".format(type(error).__name__, str(error))} + + else: + return {"error": "Unrecognized method {!r}".format(request["method"])} + + sys.stdout.write(json.dumps(response)) + sys.stdout.write("\n") + sys.stdout.flush() + + +def main(): + parser = argparse.ArgumentParser(description=r""" + The Amaranth RPC server allows a HDL synthesis program to request an Amaranth module to + be elaborated on demand using the parameters it provides. For example, using Yosys together + with the Amaranth RPC server allows instantiating parametric Amaranth modules directly + from Verilog. + """) + def add_modules_arg(parser): + parser.add_argument("modules", metavar="MODULE", type=str, nargs="+", + help="import and provide MODULES") + protocols = parser.add_subparsers(metavar="PROTOCOL", dest="protocol", required=True) + protocol_yosys = protocols.add_parser("yosys", help="use Yosys JSON-based RPC protocol") + add_modules_arg(protocol_yosys) + + args = parser.parse_args() + modules = _collect_modules(args.modules) + if args.protocol == "yosys": + _serve_yosys(modules) + + +if __name__ == "__main__": + main() diff --git a/amaranth/sim/__init__.py b/amaranth/sim/__init__.py new file mode 100644 index 0000000..c239c52 --- /dev/null +++ b/amaranth/sim/__init__.py @@ -0,0 +1,4 @@ +from .core import * + + +__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"] diff --git a/nmigen/sim/_base.py b/amaranth/sim/_base.py similarity index 100% rename from nmigen/sim/_base.py rename to amaranth/sim/_base.py diff --git a/nmigen/sim/_pyclock.py b/amaranth/sim/_pyclock.py similarity index 100% rename from nmigen/sim/_pyclock.py rename to amaranth/sim/_pyclock.py diff --git a/nmigen/sim/_pycoro.py b/amaranth/sim/_pycoro.py similarity index 100% rename from nmigen/sim/_pycoro.py rename to amaranth/sim/_pycoro.py diff --git a/nmigen/sim/_pyrtl.py b/amaranth/sim/_pyrtl.py similarity index 99% rename from nmigen/sim/_pyrtl.py rename to amaranth/sim/_pyrtl.py index 13d515f..34be9d6 100644 --- a/nmigen/sim/_pyrtl.py +++ b/amaranth/sim/_pyrtl.py @@ -430,8 +430,8 @@ class _FragmentCompiler: # (almost certainly due to a bug in the code generator), use this environment variable # to make backtraces useful. code = emitter.flush() - if os.getenv("NMIGEN_pysim_dump"): - file = tempfile.NamedTemporaryFile("w", prefix="nmigen_pysim_", delete=False) + if os.getenv("AMARANTH_pysim_dump"): + file = tempfile.NamedTemporaryFile("w", prefix="amaranth_pysim_", delete=False) file.write(code) filename = file.name else: diff --git a/amaranth/sim/core.py b/amaranth/sim/core.py new file mode 100644 index 0000000..b829400 --- /dev/null +++ b/amaranth/sim/core.py @@ -0,0 +1,206 @@ +import inspect + +from .._utils import deprecated +from ..hdl.cd import * +from ..hdl.ir import * +from ._base import BaseEngine + + +__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"] + + +class Command: + pass + + +class Settle(Command): + def __repr__(self): + return "(settle)" + + +class Delay(Command): + def __init__(self, interval=None): + self.interval = None if interval is None else float(interval) + + def __repr__(self): + if self.interval is None: + return "(delay ε)" + else: + return "(delay {:.3}us)".format(self.interval * 1e6) + + +class Tick(Command): + def __init__(self, domain="sync"): + if not isinstance(domain, (str, ClockDomain)): + raise TypeError("Domain must be a string or a ClockDomain instance, not {!r}" + .format(domain)) + assert domain != "comb" + self.domain = domain + + def __repr__(self): + return "(tick {})".format(self.domain) + + +class Passive(Command): + def __repr__(self): + return "(passive)" + + +class Active(Command): + def __repr__(self): + return "(active)" + + +class Simulator: + def __init__(self, fragment, *, engine="pysim"): + if isinstance(engine, type) and issubclass(engine, BaseEngine): + pass + elif engine == "pysim": + from .pysim import PySimEngine + engine = PySimEngine + else: + raise TypeError("Value '{!r}' is not a simulation engine class or " + "a simulation engine name" + .format(engine)) + + self._fragment = Fragment.get(fragment, platform=None).prepare() + self._engine = engine(self._fragment) + self._clocked = set() + + def _check_process(self, process): + if not (inspect.isgeneratorfunction(process) or inspect.iscoroutinefunction(process)): + raise TypeError("Cannot add a process {!r} because it is not a generator function" + .format(process)) + return process + + def add_process(self, process): + process = self._check_process(process) + def wrapper(): + # Only start a bench process after comb settling, so that the reset values are correct. + yield Settle() + yield from process() + self._engine.add_coroutine_process(wrapper, default_cmd=None) + + def add_sync_process(self, process, *, domain="sync"): + process = self._check_process(process) + def wrapper(): + # Only start a sync process after the first clock edge (or reset edge, if the domain + # uses an asynchronous reset). This matches the behavior of synchronous FFs. + yield Tick(domain) + yield from process() + self._engine.add_coroutine_process(wrapper, default_cmd=Tick(domain)) + + def add_clock(self, period, *, phase=None, domain="sync", if_exists=False): + """Add a clock process. + + Adds a process that drives the clock signal of ``domain`` at a 50% duty cycle. + + Arguments + --------- + period : float + Clock period. The process will toggle the ``domain`` clock signal every ``period / 2`` + seconds. + phase : None or float + Clock phase. The process will wait ``phase`` seconds before the first clock transition. + If not specified, defaults to ``period / 2``. + domain : str or ClockDomain + Driven clock domain. If specified as a string, the domain with that name is looked up + in the root fragment of the simulation. + if_exists : bool + If ``False`` (the default), raise an error if the driven domain is specified as + a string and the root fragment does not have such a domain. If ``True``, do nothing + in this case. + """ + if isinstance(domain, ClockDomain): + pass + elif domain in self._fragment.domains: + domain = self._fragment.domains[domain] + elif if_exists: + return + else: + raise ValueError("Domain {!r} is not present in simulation" + .format(domain)) + if domain in self._clocked: + raise ValueError("Domain {!r} already has a clock driving it" + .format(domain.name)) + + if phase is None: + # By default, delay the first edge by half period. This causes any synchronous activity + # to happen at a non-zero time, distinguishing it from the reset values in the waveform + # viewer. + phase = period / 2 + self._engine.add_clock_process(domain.clk, phase=phase, period=period) + self._clocked.add(domain) + + def reset(self): + """Reset the simulation. + + Assign the reset value to every signal in the simulation, and restart every user process. + """ + self._engine.reset() + + # TODO(amaranth-0.4): replace with _real_step + @deprecated("instead of `sim.step()`, use `sim.advance()`") + def step(self): + return self.advance() + + def advance(self): + """Advance the simulation. + + Run every process and commit changes until a fixed point is reached, then advance time + to the closest deadline (if any). If there is an unstable combinatorial loop, + this function will never return. + + Returns ``True`` if there are any active processes, ``False`` otherwise. + """ + return self._engine.advance() + + def run(self): + """Run the simulation while any processes are active. + + Processes added with :meth:`add_process` and :meth:`add_sync_process` are initially active, + and may change their status using the ``yield Passive()`` and ``yield Active()`` commands. + Processes compiled from HDL and added with :meth:`add_clock` are always passive. + """ + while self.advance(): + pass + + def run_until(self, deadline, *, run_passive=False): + """Run the simulation until it advances to ``deadline``. + + If ``run_passive`` is ``False``, the simulation also stops when there are no active + processes, similar to :meth:`run`. Otherwise, the simulation will stop only after it + advances to or past ``deadline``. + + If the simulation stops advancing, this function will never return. + """ + assert self._engine.now <= deadline + while (self.advance() or run_passive) and self._engine.now < deadline: + pass + + def write_vcd(self, vcd_file, gtkw_file=None, *, traces=()): + """Write waveforms to a Value Change Dump file, optionally populating a GTKWave save file. + + This method returns a context manager. It can be used as: :: + + sim = Simulator(frag) + sim.add_clock(1e-6) + with sim.write_vcd("dump.vcd", "dump.gtkw"): + sim.run_until(1e-3) + + Arguments + --------- + vcd_file : str or file-like object + Verilog Value Change Dump file or filename. + gtkw_file : str or file-like object + GTKWave save file or filename. + traces : iterable of Signal + Signals to display traces for. + """ + if self._engine.now != 0.0: + for file in (vcd_file, gtkw_file): + if hasattr(file, "close"): + file.close() + raise ValueError("Cannot start writing waveforms after advancing simulation time") + + return self._engine.write_vcd(vcd_file=vcd_file, gtkw_file=gtkw_file, traces=traces) diff --git a/amaranth/sim/pysim.py b/amaranth/sim/pysim.py new file mode 100644 index 0000000..94979db --- /dev/null +++ b/amaranth/sim/pysim.py @@ -0,0 +1,336 @@ +from contextlib import contextmanager +import itertools +from vcd import VCDWriter +from vcd.gtkw import GTKWSave + +from ..hdl import * +from ..hdl.ast import SignalDict +from ._base import * +from ._pyrtl import _FragmentCompiler +from ._pycoro import PyCoroProcess +from ._pyclock import PyClockProcess + + +__all__ = ["PySimEngine"] + + +class _NameExtractor: + def __init__(self): + self.names = SignalDict() + + def __call__(self, fragment, *, hierarchy=("top",)): + def add_signal_name(signal): + hierarchical_signal_name = (*hierarchy, signal.name) + if signal not in self.names: + self.names[signal] = {hierarchical_signal_name} + else: + self.names[signal].add(hierarchical_signal_name) + + for domain_name, domain_signals in fragment.drivers.items(): + if domain_name is not None: + domain = fragment.domains[domain_name] + add_signal_name(domain.clk) + if domain.rst is not None: + add_signal_name(domain.rst) + + for statement in fragment.statements: + for signal in statement._lhs_signals() | statement._rhs_signals(): + if not isinstance(signal, (ClockSignal, ResetSignal)): + add_signal_name(signal) + + for subfragment_index, (subfragment, subfragment_name) in enumerate(fragment.subfragments): + if subfragment_name is None: + subfragment_name = "U${}".format(subfragment_index) + self(subfragment, hierarchy=(*hierarchy, subfragment_name)) + + return self.names + + +class _VCDWriter: + @staticmethod + def timestamp_to_vcd(timestamp): + return timestamp * (10 ** 10) # 1/(100 ps) + + @staticmethod + def decode_to_vcd(signal, value): + return signal.decoder(value).expandtabs().replace(" ", "_") + + def __init__(self, fragment, *, vcd_file, gtkw_file=None, traces=()): + if isinstance(vcd_file, str): + vcd_file = open(vcd_file, "wt") + if isinstance(gtkw_file, str): + gtkw_file = open(gtkw_file, "wt") + + self.vcd_vars = SignalDict() + self.vcd_file = vcd_file + self.vcd_writer = vcd_file and VCDWriter(self.vcd_file, + timescale="100 ps", comment="Generated by Amaranth") + + self.gtkw_names = SignalDict() + self.gtkw_file = gtkw_file + self.gtkw_save = gtkw_file and GTKWSave(self.gtkw_file) + + self.traces = [] + + signal_names = _NameExtractor()(fragment) + + trace_names = SignalDict() + for trace in traces: + if trace not in signal_names: + trace_names[trace] = {("top", trace.name)} + self.traces.append(trace) + + if self.vcd_writer is None: + return + + for signal, names in itertools.chain(signal_names.items(), trace_names.items()): + if signal.decoder: + var_type = "string" + var_size = 1 + var_init = self.decode_to_vcd(signal, signal.reset) + else: + var_type = "wire" + var_size = signal.width + var_init = signal.reset + + for (*var_scope, var_name) in names: + suffix = None + while True: + try: + if suffix is None: + var_name_suffix = var_name + else: + var_name_suffix = "{}${}".format(var_name, suffix) + if signal not in self.vcd_vars: + vcd_var = self.vcd_writer.register_var( + scope=var_scope, name=var_name_suffix, + var_type=var_type, size=var_size, init=var_init) + self.vcd_vars[signal] = vcd_var + else: + self.vcd_writer.register_alias( + scope=var_scope, name=var_name_suffix, + var=self.vcd_vars[signal]) + break + except KeyError: + suffix = (suffix or 0) + 1 + + if signal not in self.gtkw_names: + self.gtkw_names[signal] = (*var_scope, var_name_suffix) + + def update(self, timestamp, signal, value): + vcd_var = self.vcd_vars.get(signal) + if vcd_var is None: + return + + vcd_timestamp = self.timestamp_to_vcd(timestamp) + if signal.decoder: + var_value = self.decode_to_vcd(signal, value) + else: + var_value = value + self.vcd_writer.change(vcd_var, vcd_timestamp, var_value) + + def close(self, timestamp): + if self.vcd_writer is not None: + self.vcd_writer.close(self.timestamp_to_vcd(timestamp)) + + if self.gtkw_save is not None: + self.gtkw_save.dumpfile(self.vcd_file.name) + self.gtkw_save.dumpfile_size(self.vcd_file.tell()) + + self.gtkw_save.treeopen("top") + for signal in self.traces: + if len(signal) > 1 and not signal.decoder: + suffix = "[{}:0]".format(len(signal) - 1) + else: + suffix = "" + self.gtkw_save.trace(".".join(self.gtkw_names[signal]) + suffix) + + if self.vcd_file is not None: + self.vcd_file.close() + if self.gtkw_file is not None: + self.gtkw_file.close() + + +class _Timeline: + def __init__(self): + self.now = 0.0 + self.deadlines = dict() + + def reset(self): + self.now = 0.0 + self.deadlines.clear() + + def at(self, run_at, process): + assert process not in self.deadlines + self.deadlines[process] = run_at + + def delay(self, delay_by, process): + if delay_by is None: + run_at = self.now + else: + run_at = self.now + delay_by + self.at(run_at, process) + + def advance(self): + nearest_processes = set() + nearest_deadline = None + for process, deadline in self.deadlines.items(): + if deadline is None: + if nearest_deadline is not None: + nearest_processes.clear() + nearest_processes.add(process) + nearest_deadline = self.now + break + elif nearest_deadline is None or deadline <= nearest_deadline: + assert deadline >= self.now + if nearest_deadline is not None and deadline < nearest_deadline: + nearest_processes.clear() + nearest_processes.add(process) + nearest_deadline = deadline + + if not nearest_processes: + return False + + for process in nearest_processes: + process.runnable = True + del self.deadlines[process] + self.now = nearest_deadline + + return True + + +class _PySignalState(BaseSignalState): + __slots__ = ("signal", "curr", "next", "waiters", "pending") + + def __init__(self, signal, pending): + self.signal = signal + self.pending = pending + self.waiters = dict() + self.curr = self.next = signal.reset + + def set(self, value): + if self.next == value: + return + self.next = value + self.pending.add(self) + + def commit(self): + if self.curr == self.next: + return False + self.curr = self.next + + awoken_any = False + for process, trigger in self.waiters.items(): + if trigger is None or trigger == self.curr: + process.runnable = awoken_any = True + return awoken_any + + +class _PySimulation(BaseSimulation): + def __init__(self): + self.timeline = _Timeline() + self.signals = SignalDict() + self.slots = [] + self.pending = set() + + def reset(self): + self.timeline.reset() + for signal, index in self.signals.items(): + self.slots[index].curr = self.slots[index].next = signal.reset + self.pending.clear() + + def get_signal(self, signal): + try: + return self.signals[signal] + except KeyError: + index = len(self.slots) + self.slots.append(_PySignalState(signal, self.pending)) + self.signals[signal] = index + return index + + def add_trigger(self, process, signal, *, trigger=None): + index = self.get_signal(signal) + assert (process not in self.slots[index].waiters or + self.slots[index].waiters[process] == trigger) + self.slots[index].waiters[process] = trigger + + def remove_trigger(self, process, signal): + index = self.get_signal(signal) + assert process in self.slots[index].waiters + del self.slots[index].waiters[process] + + def wait_interval(self, process, interval): + self.timeline.delay(interval, process) + + def commit(self, changed=None): + converged = True + for signal_state in self.pending: + if signal_state.commit(): + converged = False + if changed is not None: + changed.update(self.pending) + self.pending.clear() + return converged + + +class PySimEngine(BaseEngine): + def __init__(self, fragment): + self._state = _PySimulation() + self._timeline = self._state.timeline + + self._fragment = fragment + self._processes = _FragmentCompiler(self._state)(self._fragment) + self._vcd_writers = [] + + def add_coroutine_process(self, process, *, default_cmd): + self._processes.add(PyCoroProcess(self._state, self._fragment.domains, process, + default_cmd=default_cmd)) + + def add_clock_process(self, clock, *, phase, period): + self._processes.add(PyClockProcess(self._state, clock, + phase=phase, period=period)) + + def reset(self): + self._state.reset() + for process in self._processes: + process.reset() + + def _step(self): + changed = set() if self._vcd_writers else None + + # Performs the two phases of a delta cycle in a loop: + converged = False + while not converged: + # 1. eval: run and suspend every non-waiting process once, queueing signal changes + for process in self._processes: + if process.runnable: + process.runnable = False + process.run() + + # 2. commit: apply every queued signal change, waking up any waiting processes + converged = self._state.commit(changed) + + for vcd_writer in self._vcd_writers: + for signal_state in changed: + vcd_writer.update(self._timeline.now, + signal_state.signal, signal_state.curr) + + def advance(self): + self._step() + self._timeline.advance() + return any(not process.passive for process in self._processes) + + @property + def now(self): + return self._timeline.now + + @contextmanager + def write_vcd(self, *, vcd_file, gtkw_file, traces): + vcd_writer = _VCDWriter(self._fragment, + vcd_file=vcd_file, gtkw_file=gtkw_file, traces=traces) + try: + self._vcd_writers.append(vcd_writer) + yield + finally: + vcd_writer.close(self._timeline.now) + self._vcd_writers.remove(vcd_writer) diff --git a/amaranth/test/__init__.py b/amaranth/test/__init__.py new file mode 100644 index 0000000..c09c28b --- /dev/null +++ b/amaranth/test/__init__.py @@ -0,0 +1 @@ +# TODO(amaranth-0.4): remove the entire package diff --git a/amaranth/test/utils.py b/amaranth/test/utils.py new file mode 100644 index 0000000..30661b1 --- /dev/null +++ b/amaranth/test/utils.py @@ -0,0 +1,84 @@ +import os +import re +import shutil +import subprocess +import textwrap +import traceback +import unittest +import warnings + +from ..hdl.ast import * +from ..hdl.ir import * +from ..back import rtlil +from .._toolchain import require_tool + + +warnings.warn("amaranth.test.utils is an internal utility module that has several design flaws " + "and was never intended as a public API; it will be removed in amaranth 0.4. " + "if you are using FHDLTestCase, include its implementation in your codebase. " + "see also amaranth-lang/amaranth#487", + DeprecationWarning, stacklevel=2) + + +__all__ = ["FHDLTestCase"] + + +class FHDLTestCase(unittest.TestCase): + def assertRepr(self, obj, repr_str): + if isinstance(obj, list): + obj = Statement.cast(obj) + def prepare_repr(repr_str): + repr_str = re.sub(r"\s+", " ", repr_str) + repr_str = re.sub(r"\( (?=\()", "(", repr_str) + repr_str = re.sub(r"\) (?=\))", ")", repr_str) + return repr_str.strip() + self.assertEqual(prepare_repr(repr(obj)), prepare_repr(repr_str)) + + def assertFormal(self, spec, mode="bmc", depth=1): + caller, *_ = traceback.extract_stack(limit=2) + spec_root, _ = os.path.splitext(caller.filename) + spec_dir = os.path.dirname(spec_root) + spec_name = "{}_{}".format( + os.path.basename(spec_root).replace("test_", "spec_"), + caller.name.replace("test_", "") + ) + + # The sby -f switch seems not fully functional when sby is reading from stdin. + if os.path.exists(os.path.join(spec_dir, spec_name)): + shutil.rmtree(os.path.join(spec_dir, spec_name)) + + if mode == "hybrid": + # A mix of BMC and k-induction, as per personal communication with Claire Wolf. + script = "setattr -unset init w:* a:amaranth.sample_reg %d" + mode = "bmc" + else: + script = "" + + config = textwrap.dedent("""\ + [options] + mode {mode} + depth {depth} + wait on + + [engines] + smtbmc + + [script] + read_ilang top.il + prep + {script} + + [file top.il] + {rtlil} + """).format( + mode=mode, + depth=depth, + script=script, + rtlil=rtlil.convert(Fragment.get(spec, platform="formal")) + ) + with subprocess.Popen([require_tool("sby"), "-f", "-d", spec_name], cwd=spec_dir, + universal_newlines=True, + stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc: + stdout, stderr = proc.communicate(config) + if proc.returncode != 0: + self.fail("Formal verification failed:\n" + stdout) diff --git a/amaranth/tracer.py b/amaranth/tracer.py new file mode 100644 index 0000000..17795f3 --- /dev/null +++ b/amaranth/tracer.py @@ -0,0 +1,55 @@ +import sys +from opcode import opname + + +__all__ = ["NameNotFound", "get_var_name", "get_src_loc"] + + +class NameNotFound(Exception): + pass + + +_raise_exception = object() + + +def get_var_name(depth=2, default=_raise_exception): + frame = sys._getframe(depth) + code = frame.f_code + call_index = frame.f_lasti + while True: + call_opc = opname[code.co_code[call_index]] + if call_opc in ("EXTENDED_ARG",): + call_index += 2 + else: + break + if call_opc not in ("CALL_FUNCTION", "CALL_FUNCTION_KW", "CALL_FUNCTION_EX", "CALL_METHOD"): + return None + + index = call_index + 2 + while True: + opc = opname[code.co_code[index]] + if opc in ("STORE_NAME", "STORE_ATTR"): + name_index = int(code.co_code[index + 1]) + return code.co_names[name_index] + elif opc == "STORE_FAST": + name_index = int(code.co_code[index + 1]) + return code.co_varnames[name_index] + elif opc == "STORE_DEREF": + name_index = int(code.co_code[index + 1]) + return code.co_cellvars[name_index] + elif opc in ("LOAD_GLOBAL", "LOAD_NAME", "LOAD_ATTR", "LOAD_FAST", "LOAD_DEREF", + "DUP_TOP", "BUILD_LIST"): + index += 2 + else: + if default is _raise_exception: + raise NameNotFound + else: + return default + + +def get_src_loc(src_loc_at=0): + # n-th frame: get_src_loc() + # n-1th frame: caller of get_src_loc() (usually constructor) + # n-2th frame: caller of caller (usually user code) + frame = sys._getframe(2 + src_loc_at) + return (frame.f_code.co_filename, frame.f_lineno) diff --git a/amaranth/utils.py b/amaranth/utils.py new file mode 100644 index 0000000..227258a --- /dev/null +++ b/amaranth/utils.py @@ -0,0 +1,21 @@ +__all__ = ["log2_int", "bits_for"] + + +def log2_int(n, need_pow2=True): + if n == 0: + return 0 + r = (n - 1).bit_length() + if need_pow2 and (1 << r) != n: + raise ValueError("{} is not a power of 2".format(n)) + return r + + +def bits_for(n, require_sign_bit=False): + if n > 0: + r = log2_int(n + 1, False) + else: + require_sign_bit = True + r = log2_int(-n, False) + if require_sign_bit: + r += 1 + return r diff --git a/amaranth/vendor/__init__.py b/amaranth/vendor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/amaranth/vendor/intel.py b/amaranth/vendor/intel.py new file mode 100644 index 0000000..23ee18d --- /dev/null +++ b/amaranth/vendor/intel.py @@ -0,0 +1,571 @@ +from abc import abstractproperty + +from ..hdl import * +from ..build import * + + +__all__ = ["IntelPlatform"] + + +class IntelPlatform(TemplatedPlatform): + """ + Quartus toolchain + ----------------- + + Required tools: + * ``quartus_map`` + * ``quartus_fit`` + * ``quartus_asm`` + * ``quartus_sta`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Quartus``, if present. + + Available overrides: + * ``add_settings``: inserts commands at the end of the QSF file. + * ``add_constraints``: inserts commands at the end of the SDC file. + * ``nproc``: sets the number of cores used by all tools. + * ``quartus_map_opts``: adds extra options for ``quartus_map``. + * ``quartus_fit_opts``: adds extra options for ``quartus_fit``. + * ``quartus_asm_opts``: adds extra options for ``quartus_asm``. + * ``quartus_sta_opts``: adds extra options for ``quartus_sta``. + + Build products: + * ``*.rpt``: toolchain reports. + * ``{{name}}.sof``: bitstream as SRAM object file. + * ``{{name}}.rbf``: bitstream as raw binary file. + + + Mistral toolchain + ----------------- + + Required tools: + * ``yosys`` + * ``nextpnr-mistral`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Mistral``, if present. + + * ``verbose``: enables logging of informational messages to standard error. + * ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command. + * ``synth_opts``: adds options for ``synth_intel_alm`` Yosys command. + * ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script. + * ``script_after_synth``: inserts commands after ``synth_intel_alm`` in Yosys script. + * ``yosys_opts``: adds extra options for ``yosys``. + * ``nextpnr_opts``: adds extra options for ``nextpnr-mistral``. + """ + + toolchain = None # selected when creating platform + + device = abstractproperty() + package = abstractproperty() + speed = abstractproperty() + suffix = "" + + # Quartus templates + + quartus_suppressed_warnings = [ + 10264, # All case item expressions in this case statement are onehot + 10270, # Incomplete Verilog case statement has no default case item + 10335, # Unrecognized synthesis attribute + 10763, # Verilog case statement has overlapping case item expressions with non-constant or don't care bits + 10935, # Verilog casex/casez overlaps with a previous casex/vasez item expression + 12125, # Using design file which is not specified as a design file for the current project, but contains definitions used in project + 18236, # Number of processors not specified in QSF + 292013, # Feature is only available with a valid subscription license + ] + + quartus_required_tools = [ + "quartus_map", + "quartus_fit", + "quartus_asm", + "quartus_sta", + ] + + quartus_file_templates = { + **TemplatedPlatform.build_script_templates, + "build_{{name}}.sh": r""" + # {{autogenerated}} + if [ -n "${{platform._toolchain_env_var}}" ]; then + QUARTUS_ROOTDIR=$(dirname $(dirname "${{platform._toolchain_env_var}}")) + # Quartus' qenv.sh does not work with `set -e`. + . "${{platform._toolchain_env_var}}" + fi + set -e{{verbose("x")}} + {{emit_commands("sh")}} + """, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.qsf": r""" + # {{autogenerated}} + {% if get_override("nproc") -%} + set_global_assignment -name NUM_PARALLEL_PROCESSORS {{get_override("nproc")}} + {% endif %} + + {% for file in platform.iter_files(".v") -%} + set_global_assignment -name VERILOG_FILE {{file|tcl_quote}} + {% endfor %} + {% for file in platform.iter_files(".sv") -%} + set_global_assignment -name SYSTEMVERILOG_FILE {{file|tcl_quote}} + {% endfor %} + {% for file in platform.iter_files(".vhd", ".vhdl") -%} + set_global_assignment -name VHDL_FILE {{file|tcl_quote}} + {% endfor %} + set_global_assignment -name VERILOG_FILE {{name}}.v + set_global_assignment -name TOP_LEVEL_ENTITY {{name}} + + set_global_assignment -name DEVICE {{platform.device}}{{platform.package}}{{platform.speed}}{{platform.suffix}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_location_assignment -to {{port_name|tcl_quote}} PIN_{{pin_name}} + {% for key, value in attrs.items() -%} + set_instance_assignment -to {{port_name|tcl_quote}} -name {{key}} {{value|tcl_quote}} + {% endfor %} + {% endfor %} + + set_global_assignment -name GENERATE_RBF_FILE ON + + {{get_override("add_settings")|default("# (add_settings placeholder)")}} + """, + "{{name}}.sdc": r""" + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + create_clock -name {{port_signal.name|tcl_quote}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_quote}}] + {% else -%} + create_clock -name {{net_signal.name|tcl_quote}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("|")|tcl_quote}}] + {% endif %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + "{{name}}.srf": r""" + {% for warning in platform.quartus_suppressed_warnings %} + { "" "" "" "{{name}}.v" { } { } 0 {{warning}} "" 0 0 "Design Software" 0 -1 0 ""} + {% endfor %} + """, + } + quartus_command_templates = [ + r""" + {{invoke_tool("quartus_map")}} + {{get_override("quartus_map_opts")|options}} + --rev={{name}} {{name}} + """, + r""" + {{invoke_tool("quartus_fit")}} + {{get_override("quartus_fit_opts")|options}} + --rev={{name}} {{name}} + """, + r""" + {{invoke_tool("quartus_asm")}} + {{get_override("quartus_asm_opts")|options}} + --rev={{name}} {{name}} + """, + r""" + {{invoke_tool("quartus_sta")}} + {{get_override("quartus_sta_opts")|options}} + --rev={{name}} {{name}} + """, + ] + + + # Mistral templates + + mistral_required_tools = [ + "yosys", + "nextpnr-mistral" + ] + mistral_file_templates = { + **TemplatedPlatform.build_script_templates, + "{{name}}.il": r""" + # {{autogenerated}} + {{emit_rtlil()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.ys": r""" + # {{autogenerated}} + {% for file in platform.iter_files(".v") -%} + read_verilog {{get_override("read_verilog_opts")|options}} {{file}} + {% endfor %} + {% for file in platform.iter_files(".sv") -%} + read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}} + {% endfor %} + {% for file in platform.iter_files(".il") -%} + read_ilang {{file}} + {% endfor %} + read_ilang {{name}}.il + delete w:$verilog_initial_trigger + {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} + synth_intel_alm {{get_override("synth_opts")|options}} -top {{name}} + {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} + write_json {{name}}.json + """, + "{{name}}.qsf": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_location_assignment -to {{port_name|tcl_quote}} PIN_{{pin_name}} + {% for key, value in attrs.items() -%} + set_instance_assignment -to {{port_name|tcl_quote}} -name {{key}} {{value|tcl_quote}} + {% endfor %} + {% endfor %} + """, + + } + mistral_command_templates = [ + r""" + {{invoke_tool("yosys")}} + {{quiet("-q")}} + {{get_override("yosys_opts")|options}} + -l {{name}}.rpt + {{name}}.ys + """, + r""" + {{invoke_tool("nextpnr-mistral")}} + {{quiet("--quiet")}} + {{get_override("nextpnr_opts")|options}} + --log {{name}}.tim + --device {{platform.device}}{{platform.package}}{{platform.speed}}{{platform.suffix}} + --json {{name}}.json + --qsf {{name}}.qsf + --rbf {{name}}.rbf + """ + ] + + # Common logic + + def __init__(self, *, toolchain="Quartus"): + super().__init__() + + assert toolchain in ("Quartus", "Mistral") + self.toolchain = toolchain + + @property + def required_tools(self): + if self.toolchain == "Quartus": + return self.quartus_required_tools + if self.toolchain == "Mistral": + return self.mistral_required_tools + assert False + + @property + def file_templates(self): + if self.toolchain == "Quartus": + return self.quartus_file_templates + if self.toolchain == "Mistral": + return self.mistral_file_templates + assert False + + @property + def command_templates(self): + if self.toolchain == "Quartus": + return self.quartus_command_templates + if self.toolchain == "Mistral": + return self.mistral_command_templates + assert False + + def add_clock_constraint(self, clock, frequency): + super().add_clock_constraint(clock, frequency) + clock.attrs["keep"] = "true" + + @property + def default_clk_constraint(self): + # Internal high-speed oscillator on Cyclone V devices. + # It is specified to not be faster than 100MHz, but the actual + # frequency seems to vary a lot between devices. Measurements + # of 78 to 84 MHz have been observed. + if self.default_clk == "cyclonev_oscillator": + assert self.device.startswith("5C") + return Clock(100e6) + # Otherwise, use the defined Clock resource. + return super().default_clk_constraint + + def create_missing_domain(self, name): + if name == "sync" and self.default_clk == "cyclonev_oscillator": + # Use the internal high-speed oscillator for Cyclone V devices + assert self.device.startswith("5C") + m = Module() + m.domains += ClockDomain("sync") + m.submodules += Instance("cyclonev_oscillator", + i_oscena=Const(1), + o_clkout=ClockSignal("sync")) + return m + else: + return super().create_missing_domain(name) + + # The altiobuf_* and altddio_* primitives are explained in the following Intel documents: + # * https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug_altiobuf.pdf + # * https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug_altddio.pdf + # See also errata mentioned in: https://www.intel.com/content/www/us/en/programmable/support/support-resources/knowledge-base/solutions/rd11192012_735.html. + + @staticmethod + def _get_ireg(m, pin, invert): + def get_ineg(i): + if invert: + i_neg = Signal.like(i, name_suffix="_neg") + m.d.comb += i.eq(~i_neg) + return i_neg + else: + return i + + if pin.xdr == 0: + return get_ineg(pin.i) + elif pin.xdr == 1: + i_sdr = Signal(pin.width, name="{}_i_sdr") + m.submodules += Instance("$dff", + p_CLK_POLARITY=1, + p_WIDTH=pin.width, + i_CLK=pin.i_clk, + i_D=i_sdr, + o_Q=get_ineg(pin.i), + ) + return i_sdr + elif pin.xdr == 2: + i_ddr = Signal(pin.width, name="{}_i_ddr".format(pin.name)) + m.submodules["{}_i_ddr".format(pin.name)] = Instance("altddio_in", + p_width=pin.width, + i_datain=i_ddr, + i_inclock=pin.i_clk, + o_dataout_h=get_ineg(pin.i0), + o_dataout_l=get_ineg(pin.i1), + ) + return i_ddr + assert False + + @staticmethod + def _get_oreg(m, pin, invert): + def get_oneg(o): + if invert: + o_neg = Signal.like(o, name_suffix="_neg") + m.d.comb += o_neg.eq(~o) + return o_neg + else: + return o + + if pin.xdr == 0: + return get_oneg(pin.o) + elif pin.xdr == 1: + o_sdr = Signal(pin.width, name="{}_o_sdr".format(pin.name)) + m.submodules += Instance("$dff", + p_CLK_POLARITY=1, + p_WIDTH=pin.width, + i_CLK=pin.o_clk, + i_D=get_oneg(pin.o), + o_Q=o_sdr, + ) + return o_sdr + elif pin.xdr == 2: + o_ddr = Signal(pin.width, name="{}_o_ddr".format(pin.name)) + m.submodules["{}_o_ddr".format(pin.name)] = Instance("altddio_out", + p_width=pin.width, + o_dataout=o_ddr, + i_outclock=pin.o_clk, + i_datain_h=get_oneg(pin.o0), + i_datain_l=get_oneg(pin.o1), + ) + return o_ddr + assert False + + @staticmethod + def _get_oereg(m, pin): + # altiobuf_ requires an output enable signal for each pin, but pin.oe is 1 bit wide. + if pin.xdr == 0: + return Repl(pin.oe, pin.width) + elif pin.xdr in (1, 2): + oe_reg = Signal(pin.width, name="{}_oe_reg".format(pin.name)) + oe_reg.attrs["useioff"] = "1" + m.submodules += Instance("$dff", + p_CLK_POLARITY=1, + p_WIDTH=pin.width, + i_CLK=pin.o_clk, + i_D=pin.oe, + o_Q=oe_reg, + ) + return oe_reg + assert False + + def get_input(self, pin, port, attrs, invert): + self._check_feature("single-ended input", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_in", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="FALSE", + i_datain=port.io, + o_dataout=self._get_ireg(m, pin, invert) + ) + return m + + def get_output(self, pin, port, attrs, invert): + self._check_feature("single-ended output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_out", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="FALSE", + p_use_oe="FALSE", + i_datain=self._get_oreg(m, pin, invert), + o_dataout=port.io, + ) + return m + + def get_tristate(self, pin, port, attrs, invert): + self._check_feature("single-ended tristate", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_out", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="FALSE", + p_use_oe="TRUE", + i_datain=self._get_oreg(m, pin, invert), + o_dataout=port.io, + i_oe=self._get_oereg(m, pin) + ) + return m + + def get_input_output(self, pin, port, attrs, invert): + self._check_feature("single-ended input/output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_bidir", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="FALSE", + i_datain=self._get_oreg(m, pin, invert), + io_dataio=port.io, + o_dataout=self._get_ireg(m, pin, invert), + i_oe=self._get_oereg(m, pin), + ) + return m + + def get_diff_input(self, pin, port, attrs, invert): + self._check_feature("differential input", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.p.attrs["useioff"] = 1 + port.n.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_in", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="TRUE", + i_datain=port.p, + i_datain_b=port.n, + o_dataout=self._get_ireg(m, pin, invert) + ) + return m + + def get_diff_output(self, pin, port, attrs, invert): + self._check_feature("differential output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.p.attrs["useioff"] = 1 + port.n.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_out", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="TRUE", + p_use_oe="FALSE", + i_datain=self._get_oreg(m, pin, invert), + o_dataout=port.p, + o_dataout_b=port.n, + ) + return m + + def get_diff_tristate(self, pin, port, attrs, invert): + self._check_feature("differential tristate", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.p.attrs["useioff"] = 1 + port.n.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_out", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="TRUE", + p_use_oe="TRUE", + i_datain=self._get_oreg(m, pin, invert), + o_dataout=port.p, + o_dataout_b=port.n, + i_oe=self._get_oereg(m, pin), + ) + return m + + def get_diff_input_output(self, pin, port, attrs, invert): + self._check_feature("differential input/output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + if pin.xdr == 1: + port.p.attrs["useioff"] = 1 + port.n.attrs["useioff"] = 1 + + m = Module() + m.submodules[pin.name] = Instance("altiobuf_bidir", + p_enable_bus_hold="FALSE", + p_number_of_channels=pin.width, + p_use_differential_mode="TRUE", + i_datain=self._get_oreg(m, pin, invert), + io_dataio=port.p, + io_dataio_b=port.n, + o_dataout=self._get_ireg(m, pin, invert), + i_oe=self._get_oereg(m, pin), + ) + return m + + # The altera_std_synchronizer{,_bundle} megafunctions embed SDC constraints that mark false + # paths, so use them instead of our default implementation. + + def get_ff_sync(self, ff_sync): + return Instance("altera_std_synchronizer_bundle", + p_width=len(ff_sync.i), + p_depth=ff_sync._stages, + i_clk=ClockSignal(ff_sync._o_domain), + i_reset_n=Const(1), + i_din=ff_sync.i, + o_dout=ff_sync.o, + ) + + def get_async_ff_sync(self, async_ff_sync): + m = Module() + sync_output = Signal() + if async_ff_sync._edge == "pos": + m.submodules += Instance("altera_std_synchronizer", + p_depth=async_ff_sync._stages, + i_clk=ClockSignal(async_ff_sync._o_domain), + i_reset_n=~async_ff_sync.i, + i_din=Const(1), + o_dout=sync_output, + ) + else: + m.submodules += Instance("altera_std_synchronizer", + p_depth=async_ff_sync._stages, + i_clk=ClockSignal(async_ff_sync._o_domain), + i_reset_n=async_ff_sync.i, + i_din=Const(1), + o_dout=sync_output, + ) + m.d.comb += async_ff_sync.o.eq(~sync_output) + return m diff --git a/amaranth/vendor/lattice_ecp5.py b/amaranth/vendor/lattice_ecp5.py new file mode 100644 index 0000000..ec5cc73 --- /dev/null +++ b/amaranth/vendor/lattice_ecp5.py @@ -0,0 +1,666 @@ +from abc import abstractproperty + +from ..hdl import * +from ..build import * + + +__all__ = ["LatticeECP5Platform"] + + +class LatticeECP5Platform(TemplatedPlatform): + """ + Trellis toolchain + ----------------- + + Required tools: + * ``yosys`` + * ``nextpnr-ecp5`` + * ``ecppack`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Trellis``, if present. + + Available overrides: + * ``verbose``: enables logging of informational messages to standard error. + * ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command. + * ``synth_opts``: adds options for ``synth_ecp5`` Yosys command. + * ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script. + * ``script_after_synth``: inserts commands after ``synth_ecp5`` in Yosys script. + * ``yosys_opts``: adds extra options for ``yosys``. + * ``nextpnr_opts``: adds extra options for ``nextpnr-ecp5``. + * ``ecppack_opts``: adds extra options for ``ecppack``. + * ``add_preferences``: inserts commands at the end of the LPF file. + + Build products: + * ``{{name}}.rpt``: Yosys log. + * ``{{name}}.json``: synthesized RTL. + * ``{{name}}.tim``: nextpnr log. + * ``{{name}}.config``: ASCII bitstream. + * ``{{name}}.bit``: binary bitstream. + * ``{{name}}.svf``: JTAG programming vector. + + Diamond toolchain + ----------------- + + Required tools: + * ``pnmainc`` + * ``ddtcmd`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Diamond``, if present. On Linux, diamond_env as provided by Diamond + itself is a good candidate. On Windows, the following script (named ``diamond_env.bat``, + for instance) is known to work:: + + @echo off + set PATH=C:\\lscc\\diamond\\%DIAMOND_VERSION%\\bin\\nt64;%PATH% + + Available overrides: + * ``script_project``: inserts commands before ``prj_project save`` in Tcl script. + * ``script_after_export``: inserts commands after ``prj_run Export`` in Tcl script. + * ``add_preferences``: inserts commands at the end of the LPF file. + * ``add_constraints``: inserts commands at the end of the XDC file. + + Build products: + * ``{{name}}_impl/{{name}}_impl.htm``: consolidated log. + * ``{{name}}.bit``: binary bitstream. + * ``{{name}}.svf``: JTAG programming vector. + """ + + toolchain = None # selected when creating platform + + device = abstractproperty() + package = abstractproperty() + speed = abstractproperty() + grade = "C" # [C]ommercial, [I]ndustrial + + # Trellis templates + + _nextpnr_device_options = { + "LFE5U-12F": "--12k", + "LFE5U-25F": "--25k", + "LFE5U-45F": "--45k", + "LFE5U-85F": "--85k", + "LFE5UM-25F": "--um-25k", + "LFE5UM-45F": "--um-45k", + "LFE5UM-85F": "--um-85k", + "LFE5UM5G-25F": "--um5g-25k", + "LFE5UM5G-45F": "--um5g-45k", + "LFE5UM5G-85F": "--um5g-85k", + } + _nextpnr_package_options = { + "BG256": "caBGA256", + "MG285": "csfBGA285", + "BG381": "caBGA381", + "BG554": "caBGA554", + "BG756": "caBGA756", + } + + _trellis_required_tools = [ + "yosys", + "nextpnr-ecp5", + "ecppack" + ] + _trellis_file_templates = { + **TemplatedPlatform.build_script_templates, + "{{name}}.il": r""" + # {{autogenerated}} + {{emit_rtlil()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.ys": r""" + # {{autogenerated}} + {% for file in platform.iter_files(".v") -%} + read_verilog {{get_override("read_verilog_opts")|options}} {{file}} + {% endfor %} + {% for file in platform.iter_files(".sv") -%} + read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}} + {% endfor %} + {% for file in platform.iter_files(".il") -%} + read_ilang {{file}} + {% endfor %} + read_ilang {{name}}.il + delete w:$verilog_initial_trigger + {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} + synth_ecp5 {{get_override("synth_opts")|options}} -top {{name}} + {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} + write_json {{name}}.json + """, + "{{name}}.lpf": r""" + # {{autogenerated}} + BLOCK ASYNCPATHS; + BLOCK RESETPATHS; + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + LOCATE COMP "{{port_name}}" SITE "{{pin_name}}"; + {% if attrs -%} + IOBUF PORT "{{port_name}}" + {%- for key, value in attrs.items() %} {{key}}={{value}}{% endfor %}; + {% endif %} + {% endfor %} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + FREQUENCY PORT "{{port_signal.name}}" {{frequency}} HZ; + {% else -%} + FREQUENCY NET "{{net_signal|hierarchy(".")}}" {{frequency}} HZ; + {% endif %} + {% endfor %} + {{get_override("add_preferences")|default("# (add_preferences placeholder)")}} + """ + } + _trellis_command_templates = [ + r""" + {{invoke_tool("yosys")}} + {{quiet("-q")}} + {{get_override("yosys_opts")|options}} + -l {{name}}.rpt + {{name}}.ys + """, + r""" + {{invoke_tool("nextpnr-ecp5")}} + {{quiet("--quiet")}} + {{get_override("nextpnr_opts")|options}} + --log {{name}}.tim + {{platform._nextpnr_device_options[platform.device]}} + --package {{platform._nextpnr_package_options[platform.package]|upper}} + --speed {{platform.speed}} + --json {{name}}.json + --lpf {{name}}.lpf + --textcfg {{name}}.config + """, + r""" + {{invoke_tool("ecppack")}} + {{verbose("--verbose")}} + {{get_override("ecppack_opts")|options}} + --input {{name}}.config + --bit {{name}}.bit + --svf {{name}}.svf + """ + ] + + # Diamond templates + + _diamond_required_tools = [ + "pnmainc", + "ddtcmd" + ] + _diamond_file_templates = { + **TemplatedPlatform.build_script_templates, + "build_{{name}}.sh": r""" + # {{autogenerated}} + set -e{{verbose("x")}} + if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi + if [ -n "${{platform._toolchain_env_var}}" ]; then + bindir=$(dirname "${{platform._toolchain_env_var}}") + . "${{platform._toolchain_env_var}}" + fi + {{emit_commands("sh")}} + """, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.tcl": r""" + prj_project new -name {{name}} -impl impl -impl_dir {{name}}_impl \ + -dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} \ + -lpf {{name}}.lpf \ + -synthesis synplify + {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} + prj_src add {{file|tcl_escape}} + {% endfor %} + prj_src add {{name}}.v + prj_impl option top {{name}} + prj_src add {{name}}.sdc + {{get_override("script_project")|default("# (script_project placeholder)")}} + prj_project save + prj_run Synthesis -impl impl + prj_run Translate -impl impl + prj_run Map -impl impl + prj_run PAR -impl impl + prj_run Export -impl impl -task Bitgen + {{get_override("script_after_export")|default("# (script_after_export placeholder)")}} + """, + "{{name}}.lpf": r""" + # {{autogenerated}} + BLOCK ASYNCPATHS; + BLOCK RESETPATHS; + {% for port_name, pin_name, extras in platform.iter_port_constraints_bits() -%} + LOCATE COMP "{{port_name}}" SITE "{{pin_name}}"; + IOBUF PORT "{{port_name}}" + {%- for key, value in extras.items() %} {{key}}={{value}}{% endfor %}; + {% endfor %} + {{get_override("add_preferences")|default("# (add_preferences placeholder)")}} + """, + "{{name}}.sdc": r""" + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] + {% else -%} + create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] + {% endif %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + } + _diamond_command_templates = [ + # These don't have any usable command-line option overrides. + r""" + {{invoke_tool("pnmainc")}} + {{name}}.tcl + """, + r""" + {{invoke_tool("ddtcmd")}} + -oft -bit + -if {{name}}_impl/{{name}}_impl.bit -of {{name}}.bit + """, + r""" + {{invoke_tool("ddtcmd")}} + -oft -svfsingle -revd -op "Fast Program" + -if {{name}}_impl/{{name}}_impl.bit -of {{name}}.svf + """, + ] + + # Common logic + + def __init__(self, *, toolchain="Trellis"): + super().__init__() + + assert toolchain in ("Trellis", "Diamond") + self.toolchain = toolchain + + @property + def required_tools(self): + if self.toolchain == "Trellis": + return self._trellis_required_tools + if self.toolchain == "Diamond": + return self._diamond_required_tools + assert False + + @property + def file_templates(self): + if self.toolchain == "Trellis": + return self._trellis_file_templates + if self.toolchain == "Diamond": + return self._diamond_file_templates + assert False + + @property + def command_templates(self): + if self.toolchain == "Trellis": + return self._trellis_command_templates + if self.toolchain == "Diamond": + return self._diamond_command_templates + assert False + + @property + def default_clk_constraint(self): + if self.default_clk == "OSCG": + return Clock(310e6 / self.oscg_div) + return super().default_clk_constraint + + def create_missing_domain(self, name): + # Lattice ECP5 devices have two global set/reset signals: PUR, which is driven at startup + # by the configuration logic and unconditionally resets every storage element, and GSR, + # which is driven by user logic and each storage element may be configured as affected or + # unaffected by GSR. PUR is purely asynchronous, so even though it is a low-skew global + # network, its deassertion may violate a setup/hold constraint with relation to a user + # clock. To avoid this, a GSR/SGSR instance should be driven synchronized to user clock. + if name == "sync" and self.default_clk is not None: + m = Module() + if self.default_clk == "OSCG": + if not hasattr(self, "oscg_div"): + raise ValueError("OSCG divider (oscg_div) must be an integer between 2 " + "and 128") + if not isinstance(self.oscg_div, int) or self.oscg_div < 2 or self.oscg_div > 128: + raise ValueError("OSCG divider (oscg_div) must be an integer between 2 " + "and 128, not {!r}" + .format(self.oscg_div)) + clk_i = Signal() + m.submodules += Instance("OSCG", p_DIV=self.oscg_div, o_OSC=clk_i) + else: + clk_i = self.request(self.default_clk).i + if self.default_rst is not None: + rst_i = self.request(self.default_rst).i + else: + rst_i = Const(0) + + gsr0 = Signal() + gsr1 = Signal() + # There is no end-of-startup signal on ECP5, but PUR is released after IOB enable, so + # a simple reset synchronizer (with PUR as the asynchronous reset) does the job. + m.submodules += [ + Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=~rst_i, o_Q=gsr0), + Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=gsr0, o_Q=gsr1), + # Although we already synchronize the reset input to user clock, SGSR has dedicated + # clock routing to the center of the FPGA; use that just in case it turns out to be + # more reliable. (None of this is documented.) + Instance("SGSR", i_CLK=clk_i, i_GSR=gsr1), + ] + # GSR implicitly connects to every appropriate storage element. As such, the sync + # domain is reset-less; domains driven by other clocks would need to have dedicated + # reset circuitry or otherwise meet setup/hold constraints on their own. + m.domains += ClockDomain("sync", reset_less=True) + m.d.comb += ClockSignal("sync").eq(clk_i) + return m + + _single_ended_io_types = [ + "HSUL12", "LVCMOS12", "LVCMOS15", "LVCMOS18", "LVCMOS25", "LVCMOS33", "LVTTL33", + "SSTL135_I", "SSTL135_II", "SSTL15_I", "SSTL15_II", "SSTL18_I", "SSTL18_II", + ] + _differential_io_types = [ + "BLVDS25", "BLVDS25E", "HSUL12D", "LVCMOS18D", "LVCMOS25D", "LVCMOS33D", + "LVDS", "LVDS25E", "LVPECL33", "LVPECL33E", "LVTTL33D", "MLVDS", "MLVDS25E", + "SLVS", "SSTL135D_I", "SSTL135D_II", "SSTL15D_I", "SSTL15D_II", "SSTL18D_I", + "SSTL18D_II", "SUBLVDS", + ] + + def should_skip_port_component(self, port, attrs, component): + # On ECP5, a differential IO is placed by only instantiating an IO buffer primitive at + # the PIOA or PIOC location, which is always the non-inverting pin. + if attrs.get("IO_TYPE", "LVCMOS25") in self._differential_io_types and component == "n": + return True + return False + + def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False): + def get_ireg(clk, d, q): + for bit in range(len(q)): + m.submodules += Instance("IFS1P3DX", + i_SCLK=clk, + i_SP=Const(1), + i_CD=Const(0), + i_D=d[bit], + o_Q=q[bit] + ) + + def get_oreg(clk, d, q): + for bit in range(len(q)): + m.submodules += Instance("OFS1P3DX", + i_SCLK=clk, + i_SP=Const(1), + i_CD=Const(0), + i_D=d[bit], + o_Q=q[bit] + ) + + def get_oereg(clk, oe, q): + for bit in range(len(q)): + m.submodules += Instance("OFS1P3DX", + i_SCLK=clk, + i_SP=Const(1), + i_CD=Const(0), + i_D=oe, + o_Q=q[bit] + ) + + def get_iddr(sclk, d, q0, q1): + for bit in range(len(d)): + m.submodules += Instance("IDDRX1F", + i_SCLK=sclk, + i_RST=Const(0), + i_D=d[bit], + o_Q0=q0[bit], o_Q1=q1[bit] + ) + + def get_iddrx2(sclk, eclk, d, q0, q1, q2, q3): + for bit in range(len(d)): + m.submodules += Instance("IDDRX2F", + i_SCLK=sclk, + i_ECLK=eclk, + i_RST=Const(0), + i_D=d[bit], + o_Q0=q0[bit], o_Q1=q1[bit], o_Q2=q2[bit], o_Q3=q3[bit] + ) + + def get_iddr71b(sclk, eclk, d, q0, q1, q2, q3, q4, q5, q6): + for bit in range(len(d)): + m.submodules += Instance("IDDR71B", + i_SCLK=sclk, + i_ECLK=eclk, + i_RST=Const(0), + i_D=d[bit], + o_Q0=q0[bit], o_Q1=q1[bit], o_Q2=q2[bit], o_Q3=q3[bit], + o_Q4=q4[bit], o_Q5=q5[bit], o_Q6=q6[bit], + ) + + def get_oddr(sclk, d0, d1, q): + for bit in range(len(q)): + m.submodules += Instance("ODDRX1F", + i_SCLK=sclk, + i_RST=Const(0), + i_D0=d0[bit], i_D1=d1[bit], + o_Q=q[bit] + ) + + def get_oddrx2(sclk, eclk, d0, d1, d2, d3, q): + for bit in range(len(q)): + m.submodules += Instance("ODDRX2F", + i_SCLK=sclk, + i_ECLK=eclk, + i_RST=Const(0), + i_D0=d0[bit], i_D1=d1[bit], i_D2=d2[bit], i_D3=d3[bit], + o_Q=q[bit] + ) + + def get_oddr71b(sclk, eclk, d0, d1, d2, d3, d4, d5, d6, q): + for bit in range(len(q)): + m.submodules += Instance("ODDR71B", + i_SCLK=sclk, + i_ECLK=eclk, + i_RST=Const(0), + i_D0=d0[bit], i_D1=d1[bit], i_D2=d2[bit], i_D3=d3[bit], + i_D4=d4[bit], i_D5=d5[bit], i_D6=d6[bit], + o_Q=q[bit] + ) + + def get_ineg(z, invert): + if invert: + a = Signal.like(z, name_suffix="_n") + m.d.comb += z.eq(~a) + return a + else: + return z + + def get_oneg(a, invert): + if invert: + z = Signal.like(a, name_suffix="_n") + m.d.comb += z.eq(~a) + return z + else: + return a + + if "i" in pin.dir: + if pin.xdr < 2: + pin_i = get_ineg(pin.i, i_invert) + elif pin.xdr == 2: + pin_i0 = get_ineg(pin.i0, i_invert) + pin_i1 = get_ineg(pin.i1, i_invert) + elif pin.xdr == 4: + pin_i0 = get_ineg(pin.i0, i_invert) + pin_i1 = get_ineg(pin.i1, i_invert) + pin_i2 = get_ineg(pin.i2, i_invert) + pin_i3 = get_ineg(pin.i3, i_invert) + elif pin.xdr == 7: + pin_i0 = get_ineg(pin.i0, i_invert) + pin_i1 = get_ineg(pin.i1, i_invert) + pin_i2 = get_ineg(pin.i2, i_invert) + pin_i3 = get_ineg(pin.i3, i_invert) + pin_i4 = get_ineg(pin.i4, i_invert) + pin_i5 = get_ineg(pin.i5, i_invert) + pin_i6 = get_ineg(pin.i6, i_invert) + if "o" in pin.dir: + if pin.xdr < 2: + pin_o = get_oneg(pin.o, o_invert) + elif pin.xdr == 2: + pin_o0 = get_oneg(pin.o0, o_invert) + pin_o1 = get_oneg(pin.o1, o_invert) + elif pin.xdr == 4: + pin_o0 = get_oneg(pin.o0, o_invert) + pin_o1 = get_oneg(pin.o1, o_invert) + pin_o2 = get_oneg(pin.o2, o_invert) + pin_o3 = get_oneg(pin.o3, o_invert) + elif pin.xdr == 7: + pin_o0 = get_oneg(pin.o0, o_invert) + pin_o1 = get_oneg(pin.o1, o_invert) + pin_o2 = get_oneg(pin.o2, o_invert) + pin_o3 = get_oneg(pin.o3, o_invert) + pin_o4 = get_oneg(pin.o4, o_invert) + pin_o5 = get_oneg(pin.o5, o_invert) + pin_o6 = get_oneg(pin.o6, o_invert) + + i = o = t = None + if "i" in pin.dir: + i = Signal(pin.width, name="{}_xdr_i".format(pin.name)) + if "o" in pin.dir: + o = Signal(pin.width, name="{}_xdr_o".format(pin.name)) + if pin.dir in ("oe", "io"): + t = Signal(pin.width, name="{}_xdr_t".format(pin.name)) + + if pin.xdr == 0: + if "i" in pin.dir: + i = pin_i + if "o" in pin.dir: + o = pin_o + if pin.dir in ("oe", "io"): + t = Repl(~pin.oe, pin.width) + elif pin.xdr == 1: + if "i" in pin.dir: + get_ireg(pin.i_clk, i, pin_i) + if "o" in pin.dir: + get_oreg(pin.o_clk, pin_o, o) + if pin.dir in ("oe", "io"): + get_oereg(pin.o_clk, ~pin.oe, t) + elif pin.xdr == 2: + if "i" in pin.dir: + get_iddr(pin.i_clk, i, pin_i0, pin_i1) + if "o" in pin.dir: + get_oddr(pin.o_clk, pin_o0, pin_o1, o) + if pin.dir in ("oe", "io"): + get_oereg(pin.o_clk, ~pin.oe, t) + elif pin.xdr == 4: + if "i" in pin.dir: + get_iddrx2(pin.i_clk, pin.i_fclk, i, pin_i0, pin_i1, pin_i2, pin_i3) + if "o" in pin.dir: + get_oddrx2(pin.o_clk, pin.o_fclk, pin_o0, pin_o1, pin_o2, pin_o3, o) + if pin.dir in ("oe", "io"): + get_oereg(pin.o_clk, ~pin.oe, t) + elif pin.xdr == 7: + if "i" in pin.dir: + get_iddr71b(pin.i_clk, pin.i_fclk, i, pin_i0, pin_i1, pin_i2, pin_i3, pin_i4, pin_i5, pin_i6) + if "o" in pin.dir: + get_oddr71b(pin.o_clk, pin.o_fclk, pin_o0, pin_o1, pin_o2, pin_o3, pin_o4, pin_o5, pin_o6, o) + if pin.dir in ("oe", "io"): + get_oereg(pin.o_clk, ~pin.oe, t) + else: + assert False + + return (i, o, t) + + def get_input(self, pin, port, attrs, invert): + self._check_feature("single-ended input", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", + i_I=port.io[bit], + o_O=i[bit] + ) + return m + + def get_output(self, pin, port, attrs, invert): + self._check_feature("single-ended output", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", + i_I=o[bit], + o_O=port.io[bit] + ) + return m + + def get_tristate(self, pin, port, attrs, invert): + self._check_feature("single-ended tristate", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", + i_T=t[bit], + i_I=o[bit], + o_O=port.io[bit] + ) + return m + + def get_input_output(self, pin, port, attrs, invert): + self._check_feature("single-ended input/output", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", + i_T=t[bit], + i_I=o[bit], + o_O=i[bit], + io_B=port.io[bit] + ) + return m + + def get_diff_input(self, pin, port, attrs, invert): + self._check_feature("differential input", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", + i_I=port.p[bit], + o_O=i[bit] + ) + return m + + def get_diff_output(self, pin, port, attrs, invert): + self._check_feature("differential output", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", + i_I=o[bit], + o_O=port.p[bit], + ) + return m + + def get_diff_tristate(self, pin, port, attrs, invert): + self._check_feature("differential tristate", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", + i_T=t[bit], + i_I=o[bit], + o_O=port.p[bit], + ) + return m + + def get_diff_input_output(self, pin, port, attrs, invert): + self._check_feature("differential input/output", pin, attrs, + valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", + i_T=t[bit], + i_I=o[bit], + o_O=i[bit], + io_B=port.p[bit], + ) + return m + + # CDC primitives are not currently specialized for ECP5. + # While Diamond supports false path constraints; nextpnr-ecp5 does not. diff --git a/amaranth/vendor/lattice_ice40.py b/amaranth/vendor/lattice_ice40.py new file mode 100644 index 0000000..59d97d5 --- /dev/null +++ b/amaranth/vendor/lattice_ice40.py @@ -0,0 +1,627 @@ +from abc import abstractproperty + +from ..hdl import * +from ..lib.cdc import ResetSynchronizer +from ..build import * + + +__all__ = ["LatticeICE40Platform"] + + +class LatticeICE40Platform(TemplatedPlatform): + """ + IceStorm toolchain + ------------------ + + Required tools: + * ``yosys`` + * ``nextpnr-ice40`` + * ``icepack`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_IceStorm``, if present. + + Available overrides: + * ``verbose``: enables logging of informational messages to standard error. + * ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command. + * ``synth_opts``: adds options for ``synth_ice40`` Yosys command. + * ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script. + * ``script_after_synth``: inserts commands after ``synth_ice40`` in Yosys script. + * ``yosys_opts``: adds extra options for ``yosys``. + * ``nextpnr_opts``: adds extra options for ``nextpnr-ice40``. + * ``add_pre_pack``: inserts commands at the end in pre-pack Python script. + * ``add_constraints``: inserts commands at the end in the PCF file. + + Build products: + * ``{{name}}.rpt``: Yosys log. + * ``{{name}}.json``: synthesized RTL. + * ``{{name}}.tim``: nextpnr log. + * ``{{name}}.asc``: ASCII bitstream. + * ``{{name}}.bin``: binary bitstream. + + iCECube2 toolchain + ------------------ + + This toolchain comes in two variants: ``LSE-iCECube2`` and ``Synplify-iCECube2``. + + Required tools: + * iCECube2 toolchain + * ``tclsh`` + + The environment is populated by setting the necessary environment variables based on + ``AMARANTH_ENV_iCECube2``, which must point to the root of the iCECube2 installation, and + is required. + + Available overrides: + * ``verbose``: enables logging of informational messages to standard error. + * ``lse_opts``: adds options for LSE. + * ``script_after_add``: inserts commands after ``add_file`` in Synplify Tcl script. + * ``script_after_options``: inserts commands after ``set_option`` in Synplify Tcl script. + * ``add_constraints``: inserts commands in SDC file. + * ``script_after_flow``: inserts commands after ``run_sbt_backend_auto`` in SBT + Tcl script. + + Build products: + * ``{{name}}_lse.log`` (LSE) or ``{{name}}_design/{{name}}.htm`` (Synplify): synthesis log. + * ``sbt/outputs/router/{{name}}_timing.rpt``: timing report. + * ``{{name}}.edf``: EDIF netlist. + * ``{{name}}.bin``: binary bitstream. + """ + + toolchain = None # selected when creating platform + + device = abstractproperty() + package = abstractproperty() + + # IceStorm templates + + _nextpnr_device_options = { + "iCE40LP384": "--lp384", + "iCE40LP1K": "--lp1k", + "iCE40LP4K": "--lp8k", + "iCE40LP8K": "--lp8k", + "iCE40HX1K": "--hx1k", + "iCE40HX4K": "--hx8k", + "iCE40HX8K": "--hx8k", + "iCE40UP5K": "--up5k", + "iCE40UP3K": "--up5k", + "iCE5LP4K": "--u4k", + "iCE5LP2K": "--u4k", + "iCE5LP1K": "--u4k", + } + _nextpnr_package_options = { + "iCE40LP4K": ":4k", + "iCE40HX4K": ":4k", + "iCE40UP3K": "", + "iCE5LP2K": "", + "iCE5LP1K": "", + } + + _icestorm_required_tools = [ + "yosys", + "nextpnr-ice40", + "icepack", + ] + _icestorm_file_templates = { + **TemplatedPlatform.build_script_templates, + "{{name}}.il": r""" + # {{autogenerated}} + {{emit_rtlil()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.ys": r""" + # {{autogenerated}} + {% for file in platform.iter_files(".v") -%} + read_verilog {{get_override("read_verilog_opts")|options}} {{file}} + {% endfor %} + {% for file in platform.iter_files(".sv") -%} + read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}} + {% endfor %} + {% for file in platform.iter_files(".il") -%} + read_ilang {{file}} + {% endfor %} + read_ilang {{name}}.il + delete w:$verilog_initial_trigger + {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} + synth_ice40 {{get_override("synth_opts")|options}} -top {{name}} + {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} + write_json {{name}}.json + """, + "{{name}}.pcf": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_io {{port_name}} {{pin_name}} + {% endfor %} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + set_frequency {{net_signal|hierarchy(".")}} {{frequency/1000000}} + {% endfor%} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + } + _icestorm_command_templates = [ + r""" + {{invoke_tool("yosys")}} + {{quiet("-q")}} + {{get_override("yosys_opts")|options}} + -l {{name}}.rpt + {{name}}.ys + """, + r""" + {{invoke_tool("nextpnr-ice40")}} + {{quiet("--quiet")}} + {{get_override("nextpnr_opts")|options}} + --log {{name}}.tim + {{platform._nextpnr_device_options[platform.device]}} + --package + {{platform.package|lower}}{{platform._nextpnr_package_options[platform.device]| + default("")}} + --json {{name}}.json + --pcf {{name}}.pcf + --asc {{name}}.asc + """, + r""" + {{invoke_tool("icepack")}} + {{verbose("-v")}} + {{name}}.asc + {{name}}.bin + """ + ] + + # iCECube2 templates + + _icecube2_required_tools = [ + "synthesis", + "synpwrap", + "tclsh", + ] + _icecube2_file_templates = { + **TemplatedPlatform.build_script_templates, + "build_{{name}}.sh": r""" + # {{autogenerated}} + set -e{{verbose("x")}} + if [ -n "${{platform._toolchain_env_var}}" ]; then + # LSE environment + export LD_LIBRARY_PATH=${{platform._toolchain_env_var}}/LSE/bin/lin64:$LD_LIBRARY_PATH + export PATH=${{platform._toolchain_env_var}}/LSE/bin/lin64:$PATH + export FOUNDRY=${{platform._toolchain_env_var}}/LSE + # Synplify environment + export LD_LIBRARY_PATH=${{platform._toolchain_env_var}}/sbt_backend/bin/linux/opt/synpwrap:$LD_LIBRARY_PATH + export PATH=${{platform._toolchain_env_var}}/sbt_backend/bin/linux/opt/synpwrap:$PATH + export SYNPLIFY_PATH=${{platform._toolchain_env_var}}/synpbase + # Common environment + export SBT_DIR=${{platform._toolchain_env_var}}/sbt_backend + else + echo "Variable ${{platform._toolchain_env_var}} must be set" >&2; exit 1 + fi + {{emit_commands("sh")}} + """, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}_lse.prj": r""" + # {{autogenerated}} + -a SBT{{platform.family}} + -d {{platform.device}} + -t {{platform.package}} + {{get_override("lse_opts")|options|default("# (lse_opts placeholder)")}} + {% for file in platform.iter_files(".v") -%} + -ver {{file}} + {% endfor %} + -ver {{name}}.v + -sdc {{name}}.sdc + -top {{name}} + -output_edif {{name}}.edf + -logfile {{name}}_lse.log + """, + "{{name}}_syn.prj": r""" + # {{autogenerated}} + {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} + add_file -verilog {{file|tcl_escape}} + {% endfor %} + add_file -verilog {{name}}.v + add_file -constraint {{name}}.sdc + {{get_override("script_after_add")|default("# (script_after_add placeholder)")}} + impl -add {{name}}_design -type fpga + set_option -technology SBT{{platform.family}} + set_option -part {{platform.device}} + set_option -package {{platform.package}} + {{get_override("script_after_options")|default("# (script_after_options placeholder)")}} + project -result_format edif + project -result_file {{name}}.edf + impl -active {{name}}_design + project -run compile + project -run map + project -run fpga_mapper + file copy -force -- {{name}}_design/{{name}}.edf {{name}}.edf + """, + "{{name}}.sdc": r""" + # {{autogenerated}} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] + {% else -%} + create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] + {% endif %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + "{{name}}.tcl": r""" + # {{autogenerated}} + set device {{platform.device}}-{{platform.package}} + set top_module {{name}} + set proj_dir . + set output_dir . + set edif_file {{name}} + set tool_options ":edifparser -y {{name}}.pcf" + set sbt_root $::env(SBT_DIR) + append sbt_tcl $sbt_root "/tcl/sbt_backend_synpl.tcl" + source $sbt_tcl + run_sbt_backend_auto $device $top_module $proj_dir $output_dir $tool_options $edif_file + {{get_override("script_after_file")|default("# (script_after_file placeholder)")}} + file copy -force -- sbt/outputs/bitmap/{{name}}_bitmap.bin {{name}}.bin + exit + """, + "{{name}}.pcf": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_io {{port_name}} {{pin_name}} + {% endfor %} + """, + } + _lse_icecube2_command_templates = [ + r"""synthesis -f {{name}}_lse.prj""", + r"""tclsh {{name}}.tcl""", + ] + _synplify_icecube2_command_templates = [ + r"""synpwrap -prj {{name}}_syn.prj -log {{name}}_syn.log""", + r"""tclsh {{name}}.tcl""", + ] + + # Common logic + + def __init__(self, *, toolchain="IceStorm"): + super().__init__() + + assert toolchain in ("IceStorm", "LSE-iCECube2", "Synplify-iCECube2") + self.toolchain = toolchain + + @property + def family(self): + if self.device.startswith("iCE40"): + return "iCE40" + if self.device.startswith("iCE5"): + return "iCE5" + assert False + + @property + def _toolchain_env_var(self): + if self.toolchain == "IceStorm": + return f"AMARANTH_ENV_{self.toolchain}" + if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"): + return f"AMARANTH_ENV_iCECube2" + assert False + + @property + def required_tools(self): + if self.toolchain == "IceStorm": + return self._icestorm_required_tools + if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"): + return self._icecube2_required_tools + assert False + + @property + def file_templates(self): + if self.toolchain == "IceStorm": + return self._icestorm_file_templates + if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"): + return self._icecube2_file_templates + assert False + + @property + def command_templates(self): + if self.toolchain == "IceStorm": + return self._icestorm_command_templates + if self.toolchain == "LSE-iCECube2": + return self._lse_icecube2_command_templates + if self.toolchain == "Synplify-iCECube2": + return self._synplify_icecube2_command_templates + assert False + + @property + def default_clk_constraint(self): + # Internal high-speed oscillator: 48 MHz / (2 ^ div) + if self.default_clk == "SB_HFOSC": + return Clock(48e6 / 2 ** self.hfosc_div) + # Internal low-speed oscillator: 10 KHz + elif self.default_clk == "SB_LFOSC": + return Clock(10e3) + # Otherwise, use the defined Clock resource. + return super().default_clk_constraint + + def create_missing_domain(self, name): + # For unknown reasons (no errata was ever published, and no documentation mentions this + # issue), iCE40 BRAMs read as zeroes for ~3 us after configuration and release of internal + # global reset. Note that this is a *time-based* delay, generated purely by the internal + # oscillator, which may not be observed nor influenced directly. For details, see links: + # * https://github.com/cliffordwolf/icestorm/issues/76#issuecomment-289270411 + # * https://github.com/cliffordwolf/icotools/issues/2#issuecomment-299734673 + # + # To handle this, it is necessary to have a global reset in any iCE40 design that may + # potentially instantiate BRAMs, and assert this reset for >3 us after configuration. + # (We add a margin of 5x to allow for PVT variation.) If the board includes a dedicated + # reset line, this line is ORed with the power on reset. + # + # If an internal oscillator is selected as the default clock source, the power-on-reset + # delay is increased to 100 us, since the oscillators are only stable after that long. + # + # The power-on reset timer counts up because the vendor tools do not support initialization + # of flip-flops. + if name == "sync" and self.default_clk is not None: + m = Module() + + # Internal high-speed clock: 6 MHz, 12 MHz, 24 MHz, or 48 MHz depending on the divider. + if self.default_clk == "SB_HFOSC": + if not hasattr(self, "hfosc_div"): + raise ValueError("SB_HFOSC divider exponent (hfosc_div) must be an integer " + "between 0 and 3") + if not isinstance(self.hfosc_div, int) or self.hfosc_div < 0 or self.hfosc_div > 3: + raise ValueError("SB_HFOSC divider exponent (hfosc_div) must be an integer " + "between 0 and 3, not {!r}" + .format(self.hfosc_div)) + clk_i = Signal() + m.submodules += Instance("SB_HFOSC", + i_CLKHFEN=1, + i_CLKHFPU=1, + p_CLKHF_DIV="0b{0:02b}".format(self.hfosc_div), + o_CLKHF=clk_i) + delay = int(100e-6 * self.default_clk_frequency) + # Internal low-speed clock: 10 KHz. + elif self.default_clk == "SB_LFOSC": + clk_i = Signal() + m.submodules += Instance("SB_LFOSC", + i_CLKLFEN=1, + i_CLKLFPU=1, + o_CLKLF=clk_i) + delay = int(100e-6 * self.default_clk_frequency) + # User-defined clock signal. + else: + clk_i = self.request(self.default_clk).i + delay = int(15e-6 * self.default_clk_frequency) + + if self.default_rst is not None: + rst_i = self.request(self.default_rst).i + else: + rst_i = Const(0) + + # Power-on-reset domain + m.domains += ClockDomain("por", reset_less=True, local=True) + timer = Signal(range(delay)) + ready = Signal() + m.d.comb += ClockSignal("por").eq(clk_i) + with m.If(timer == delay): + m.d.por += ready.eq(1) + with m.Else(): + m.d.por += timer.eq(timer + 1) + + # Primary domain + m.domains += ClockDomain("sync") + m.d.comb += ClockSignal("sync").eq(clk_i) + if self.default_rst is not None: + m.submodules.reset_sync = ResetSynchronizer(~ready | rst_i, domain="sync") + else: + m.d.comb += ResetSignal("sync").eq(~ready) + + return m + + def should_skip_port_component(self, port, attrs, component): + # On iCE40, a differential input is placed by only instantiating an SB_IO primitive for + # the pin with z=0, which is the non-inverting pin. The pinout unfortunately differs + # between LP/HX and UP series: + # * for LP/HX, z=0 is DPxxB (B is non-inverting, A is inverting) + # * for UP, z=0 is IOB_xxA (A is non-inverting, B is inverting) + if attrs.get("IO_STANDARD", "SB_LVCMOS") == "SB_LVDS_INPUT" and component == "n": + return True + return False + + def _get_io_buffer(self, m, pin, port, attrs, *, i_invert=False, o_invert=False, + invert_lut=False): + def get_dff(clk, d, q): + m.submodules += Instance("$dff", + p_CLK_POLARITY=1, + p_WIDTH=len(d), + i_CLK=clk, + i_D=d, + o_Q=q) + + def get_ineg(y, invert): + if invert_lut: + a = Signal.like(y, name_suffix="_x{}".format(1 if invert else 0)) + for bit in range(len(y)): + m.submodules += Instance("SB_LUT4", + p_LUT_INIT=Const(0b01 if invert else 0b10, 16), + i_I0=a[bit], + i_I1=Const(0), + i_I2=Const(0), + i_I3=Const(0), + o_O=y[bit]) + return a + elif invert: + a = Signal.like(y, name_suffix="_n") + m.d.comb += y.eq(~a) + return a + else: + return y + + def get_oneg(a, invert): + if invert_lut: + y = Signal.like(a, name_suffix="_x{}".format(1 if invert else 0)) + for bit in range(len(a)): + m.submodules += Instance("SB_LUT4", + p_LUT_INIT=Const(0b01 if invert else 0b10, 16), + i_I0=a[bit], + i_I1=Const(0), + i_I2=Const(0), + i_I3=Const(0), + o_O=y[bit]) + return y + elif invert: + y = Signal.like(a, name_suffix="_n") + m.d.comb += y.eq(~a) + return y + else: + return a + + if "GLOBAL" in attrs: + is_global_input = bool(attrs["GLOBAL"]) + del attrs["GLOBAL"] + else: + is_global_input = False + assert not (is_global_input and i_invert) + + if "i" in pin.dir: + if pin.xdr < 2: + pin_i = get_ineg(pin.i, i_invert) + elif pin.xdr == 2: + pin_i0 = get_ineg(pin.i0, i_invert) + pin_i1 = get_ineg(pin.i1, i_invert) + if "o" in pin.dir: + if pin.xdr < 2: + pin_o = get_oneg(pin.o, o_invert) + elif pin.xdr == 2: + pin_o0 = get_oneg(pin.o0, o_invert) + pin_o1 = get_oneg(pin.o1, o_invert) + + if "i" in pin.dir and pin.xdr == 2: + i0_ff = Signal.like(pin_i0, name_suffix="_ff") + i1_ff = Signal.like(pin_i1, name_suffix="_ff") + get_dff(pin.i_clk, i0_ff, pin_i0) + get_dff(pin.i_clk, i1_ff, pin_i1) + if "o" in pin.dir and pin.xdr == 2: + o1_ff = Signal.like(pin_o1, name_suffix="_ff") + get_dff(pin.o_clk, pin_o1, o1_ff) + + for bit in range(len(port)): + io_args = [ + ("io", "PACKAGE_PIN", port[bit]), + *(("p", key, value) for key, value in attrs.items()), + ] + + if "i" not in pin.dir: + # If no input pin is requested, it is important to use a non-registered input pin + # type, because an output-only pin would not have an input clock, and if its input + # is configured as registered, this would prevent a co-located input-capable pin + # from using an input clock. + i_type = 0b01 # PIN_INPUT + elif pin.xdr == 0: + i_type = 0b01 # PIN_INPUT + elif pin.xdr > 0: + i_type = 0b00 # PIN_INPUT_REGISTERED aka PIN_INPUT_DDR + if "o" not in pin.dir: + o_type = 0b0000 # PIN_NO_OUTPUT + elif pin.xdr == 0 and pin.dir == "o": + o_type = 0b0110 # PIN_OUTPUT + elif pin.xdr == 0: + o_type = 0b1010 # PIN_OUTPUT_TRISTATE + elif pin.xdr == 1 and pin.dir == "o": + o_type = 0b0101 # PIN_OUTPUT_REGISTERED + elif pin.xdr == 1: + o_type = 0b1101 # PIN_OUTPUT_REGISTERED_ENABLE_REGISTERED + elif pin.xdr == 2 and pin.dir == "o": + o_type = 0b0100 # PIN_OUTPUT_DDR + elif pin.xdr == 2: + o_type = 0b1100 # PIN_OUTPUT_DDR_ENABLE_REGISTERED + io_args.append(("p", "PIN_TYPE", C((o_type << 2) | i_type, 6))) + + if hasattr(pin, "i_clk"): + io_args.append(("i", "INPUT_CLK", pin.i_clk)) + if hasattr(pin, "o_clk"): + io_args.append(("i", "OUTPUT_CLK", pin.o_clk)) + + if "i" in pin.dir: + if pin.xdr == 0 and is_global_input: + io_args.append(("o", "GLOBAL_BUFFER_OUTPUT", pin.i[bit])) + elif pin.xdr < 2: + io_args.append(("o", "D_IN_0", pin_i[bit])) + elif pin.xdr == 2: + # Re-register both inputs before they enter fabric. This increases hold time + # to an entire cycle, and adds one cycle of latency. + io_args.append(("o", "D_IN_0", i0_ff[bit])) + io_args.append(("o", "D_IN_1", i1_ff[bit])) + if "o" in pin.dir: + if pin.xdr < 2: + io_args.append(("i", "D_OUT_0", pin_o[bit])) + elif pin.xdr == 2: + # Re-register negedge output after it leaves fabric. This increases setup time + # to an entire cycle, and doesn't add latency. + io_args.append(("i", "D_OUT_0", pin_o0[bit])) + io_args.append(("i", "D_OUT_1", o1_ff[bit])) + + if pin.dir in ("oe", "io"): + io_args.append(("i", "OUTPUT_ENABLE", pin.oe)) + + if is_global_input: + m.submodules["{}_{}".format(pin.name, bit)] = Instance("SB_GB_IO", *io_args) + else: + m.submodules["{}_{}".format(pin.name, bit)] = Instance("SB_IO", *io_args) + + def get_input(self, pin, port, attrs, invert): + self._check_feature("single-ended input", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + self._get_io_buffer(m, pin, port.io, attrs, i_invert=invert) + return m + + def get_output(self, pin, port, attrs, invert): + self._check_feature("single-ended output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + self._get_io_buffer(m, pin, port.io, attrs, o_invert=invert) + return m + + def get_tristate(self, pin, port, attrs, invert): + self._check_feature("single-ended tristate", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + self._get_io_buffer(m, pin, port.io, attrs, o_invert=invert) + return m + + def get_input_output(self, pin, port, attrs, invert): + self._check_feature("single-ended input/output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + self._get_io_buffer(m, pin, port.io, attrs, i_invert=invert, o_invert=invert) + return m + + def get_diff_input(self, pin, port, attrs, invert): + self._check_feature("differential input", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + # See comment in should_skip_port_component above. + self._get_io_buffer(m, pin, port.p, attrs, i_invert=invert) + return m + + def get_diff_output(self, pin, port, attrs, invert): + self._check_feature("differential output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + # Note that the non-inverting output pin is not driven the same way as a regular + # output pin. The inverter introduces a delay, so for a non-inverting output pin, + # an identical delay is introduced by instantiating a LUT. This makes the waveform + # perfectly symmetric in the xdr=0 case. + self._get_io_buffer(m, pin, port.p, attrs, o_invert= invert, invert_lut=True) + self._get_io_buffer(m, pin, port.n, attrs, o_invert=not invert, invert_lut=True) + return m + + # Tristate bidirectional buffers are not supported on iCE40 because it requires external + # termination, which is different for differential pins configured as inputs and outputs. + + # CDC primitives are not currently specialized for iCE40. It is not known if iCECube2 supports + # the necessary attributes; nextpnr-ice40 does not. diff --git a/amaranth/vendor/lattice_machxo2.py b/amaranth/vendor/lattice_machxo2.py new file mode 100644 index 0000000..ff593ba --- /dev/null +++ b/amaranth/vendor/lattice_machxo2.py @@ -0,0 +1,11 @@ +import warnings + +from .lattice_machxo_2_3l import LatticeMachXO2Platform + + +__all__ = ["LatticeMachXO2Platform"] + + +# TODO(amaranth-0.4): remove +warnings.warn("instead of amaranth.vendor.lattice_machxo2, use amaranth.vendor.lattice_machxo_2_3l", + DeprecationWarning, stacklevel=2) diff --git a/amaranth/vendor/lattice_machxo_2_3l.py b/amaranth/vendor/lattice_machxo_2_3l.py new file mode 100644 index 0000000..6262c3e --- /dev/null +++ b/amaranth/vendor/lattice_machxo_2_3l.py @@ -0,0 +1,421 @@ +from abc import abstractproperty + +from ..hdl import * +from ..build import * + + +__all__ = ["LatticeMachXO2Platform", "LatticeMachXO3LPlatform"] + + +# MachXO2 and MachXO3L primitives are the same. Handle both using +# one class and expose user-aliases for convenience. +class LatticeMachXO2Or3LPlatform(TemplatedPlatform): + """ + Required tools: + * ``pnmainc`` + * ``ddtcmd`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Diamond``, if present. On Linux, diamond_env as provided by Diamond + itself is a good candidate. On Windows, the following script (named ``diamond_env.bat``, + for instance) is known to work:: + + @echo off + set PATH=C:\\lscc\\diamond\\%DIAMOND_VERSION%\\bin\\nt64;%PATH% + + Available overrides: + * ``script_project``: inserts commands before ``prj_project save`` in Tcl script. + * ``script_after_export``: inserts commands after ``prj_run Export`` in Tcl script. + * ``add_preferences``: inserts commands at the end of the LPF file. + * ``add_constraints``: inserts commands at the end of the XDC file. + + Build products: + * ``{{name}}_impl/{{name}}_impl.htm``: consolidated log. + * ``{{name}}.jed``: JEDEC fuse file. + * ``{{name}}.bit``: binary bitstream. + * ``{{name}}.svf``: JTAG programming vector for FLASH programming. + * ``{{name}}_flash.svf``: JTAG programming vector for FLASH programming. + * ``{{name}}_sram.svf``: JTAG programming vector for SRAM programming. + """ + + toolchain = "Diamond" + + device = abstractproperty() + package = abstractproperty() + speed = abstractproperty() + grade = "C" # [C]ommercial, [I]ndustrial + + required_tools = [ + "pnmainc", + "ddtcmd" + ] + file_templates = { + **TemplatedPlatform.build_script_templates, + "build_{{name}}.sh": r""" + # {{autogenerated}} + set -e{{verbose("x")}} + if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi + if [ -n "${{platform._toolchain_env_var}}" ]; then + bindir=$(dirname "${{platform._toolchain_env_var}}") + . "${{platform._toolchain_env_var}}" + fi + {{emit_commands("sh")}} + """, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.tcl": r""" + prj_project new -name {{name}} -impl impl -impl_dir {{name}}_impl \ + -dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} \ + -lpf {{name}}.lpf \ + -synthesis synplify + {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} + prj_src add {{file|tcl_escape}} + {% endfor %} + prj_src add {{name}}.v + prj_impl option top {{name}} + prj_src add {{name}}.sdc + {{get_override("script_project")|default("# (script_project placeholder)")}} + prj_project save + prj_run Synthesis -impl impl + prj_run Translate -impl impl + prj_run Map -impl impl + prj_run PAR -impl impl + prj_run Export -impl impl -task Bitgen + prj_run Export -impl impl -task Jedecgen + {{get_override("script_after_export")|default("# (script_after_export placeholder)")}} + """, + "{{name}}.lpf": r""" + # {{autogenerated}} + BLOCK ASYNCPATHS; + BLOCK RESETPATHS; + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + LOCATE COMP "{{port_name}}" SITE "{{pin_name}}"; + {% if attrs -%} + IOBUF PORT "{{port_name}}" + {%- for key, value in attrs.items() %} {{key}}={{value}}{% endfor %}; + {% endif %} + {% endfor %} + {{get_override("add_preferences")|default("# (add_preferences placeholder)")}} + """, + "{{name}}.sdc": r""" + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] + {% else -%} + create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] + {% endif %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + } + command_templates = [ + # These don't have any usable command-line option overrides. + r""" + {{invoke_tool("pnmainc")}} + {{name}}.tcl + """, + r""" + {{invoke_tool("ddtcmd")}} + -oft -bit + -if {{name}}_impl/{{name}}_impl.bit -of {{name}}.bit + """, + r""" + {{invoke_tool("ddtcmd")}} + -oft -jed + -dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} + -if {{name}}_impl/{{name}}_impl.jed -of {{name}}.jed + """, + r""" + {{invoke_tool("ddtcmd")}} + -oft -svfsingle -revd -op "FLASH Erase,Program,Verify" + -if {{name}}_impl/{{name}}_impl.jed -of {{name}}_flash.svf + """, + # TODO(amaranth-0.4): remove + r""" + {% if syntax == "bat" -%} + copy {{name}}_flash.svf {{name}}.svf + {% else -%} + cp {{name}}_flash.svf {{name}}.svf + {% endif %} + """, + r""" + {{invoke_tool("ddtcmd")}} + -oft -svfsingle -revd -op "SRAM Fast Program" + -if {{name}}_impl/{{name}}_impl.bit -of {{name}}_sram.svf + """, + ] + + def create_missing_domain(self, name): + # Lattice MachXO2/MachXO3L devices have two global set/reset signals: PUR, which is driven at + # startup by the configuration logic and unconditionally resets every storage element, + # and GSR, which is driven by user logic and each storage element may be configured as + # affected or unaffected by GSR. PUR is purely asynchronous, so even though it is + # a low-skew global network, its deassertion may violate a setup/hold constraint with + # relation to a user clock. To avoid this, a GSR/SGSR instance should be driven + # synchronized to user clock. + if name == "sync" and self.default_clk is not None: + clk_i = self.request(self.default_clk).i + if self.default_rst is not None: + rst_i = self.request(self.default_rst).i + else: + rst_i = Const(0) + + gsr0 = Signal() + gsr1 = Signal() + m = Module() + # There is no end-of-startup signal on MachXO2/MachXO3L, but PUR is released after IOB + # enable, so a simple reset synchronizer (with PUR as the asynchronous reset) does the job. + m.submodules += [ + Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=~rst_i, o_Q=gsr0), + Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=gsr0, o_Q=gsr1), + # Although we already synchronize the reset input to user clock, SGSR has dedicated + # clock routing to the center of the FPGA; use that just in case it turns out to be + # more reliable. (None of this is documented.) + Instance("SGSR", i_CLK=clk_i, i_GSR=gsr1), + ] + # GSR implicitly connects to every appropriate storage element. As such, the sync + # domain is reset-less; domains driven by other clocks would need to have dedicated + # reset circuitry or otherwise meet setup/hold constraints on their own. + m.domains += ClockDomain("sync", reset_less=True) + m.d.comb += ClockSignal("sync").eq(clk_i) + return m + + _single_ended_io_types = [ + "PCI33", "LVTTL33", "LVCMOS33", "LVCMOS25", "LVCMOS18", "LVCMOS15", "LVCMOS12", + "LVCMOS25R33", "LVCMOS18R33", "LVCMOS18R25", "LVCMOS15R33", "LVCMOS15R25", "LVCMOS12R33", + "LVCMOS12R25", "LVCMOS10R33", "LVCMOS10R25", "SSTL25_I", "SSTL25_II", "SSTL18_I", + "SSTL18_II", "HSTL18_I", "HSTL18_II", + ] + _differential_io_types = [ + "LVDS25", "LVDS25E", "RSDS25", "RSDS25E", "BLVDS25", "BLVDS25E", "MLVDS25", "MLVDS25E", + "LVPECL33", "LVPECL33E", "SSTL25D_I", "SSTL25D_II", "SSTL18D_I", "SSTL18D_II", + "HSTL18D_I", "HSTL18D_II", "LVTTL33D", "LVCMOS33D", "LVCMOS25D", "LVCMOS18D", "LVCMOS15D", + "LVCMOS12D", "MIPI", + ] + + def should_skip_port_component(self, port, attrs, component): + # On ECP5, a differential IO is placed by only instantiating an IO buffer primitive at + # the PIOA or PIOC location, which is always the non-inverting pin. + if attrs.get("IO_TYPE", "LVCMOS25") in self._differential_io_types and component == "n": + return True + return False + + def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False): + def get_ireg(clk, d, q): + for bit in range(len(q)): + m.submodules += Instance("IFS1P3DX", + i_SCLK=clk, + i_SP=Const(1), + i_CD=Const(0), + i_D=d[bit], + o_Q=q[bit] + ) + + def get_oreg(clk, d, q): + for bit in range(len(q)): + m.submodules += Instance("OFS1P3DX", + i_SCLK=clk, + i_SP=Const(1), + i_CD=Const(0), + i_D=d[bit], + o_Q=q[bit] + ) + + def get_iddr(sclk, d, q0, q1): + for bit in range(len(d)): + m.submodules += Instance("IDDRXE", + i_SCLK=sclk, + i_RST=Const(0), + i_D=d[bit], + o_Q0=q0[bit], o_Q1=q1[bit] + ) + + def get_oddr(sclk, d0, d1, q): + for bit in range(len(q)): + m.submodules += Instance("ODDRXE", + i_SCLK=sclk, + i_RST=Const(0), + i_D0=d0[bit], i_D1=d1[bit], + o_Q=q[bit] + ) + + def get_ineg(z, invert): + if invert: + a = Signal.like(z, name_suffix="_n") + m.d.comb += z.eq(~a) + return a + else: + return z + + def get_oneg(a, invert): + if invert: + z = Signal.like(a, name_suffix="_n") + m.d.comb += z.eq(~a) + return z + else: + return a + + if "i" in pin.dir: + if pin.xdr < 2: + pin_i = get_ineg(pin.i, i_invert) + elif pin.xdr == 2: + pin_i0 = get_ineg(pin.i0, i_invert) + pin_i1 = get_ineg(pin.i1, i_invert) + if "o" in pin.dir: + if pin.xdr < 2: + pin_o = get_oneg(pin.o, o_invert) + elif pin.xdr == 2: + pin_o0 = get_oneg(pin.o0, o_invert) + pin_o1 = get_oneg(pin.o1, o_invert) + + i = o = t = None + if "i" in pin.dir: + i = Signal(pin.width, name="{}_xdr_i".format(pin.name)) + if "o" in pin.dir: + o = Signal(pin.width, name="{}_xdr_o".format(pin.name)) + if pin.dir in ("oe", "io"): + t = Signal(1, name="{}_xdr_t".format(pin.name)) + + if pin.xdr == 0: + if "i" in pin.dir: + i = pin_i + if "o" in pin.dir: + o = pin_o + if pin.dir in ("oe", "io"): + t = ~pin.oe + elif pin.xdr == 1: + # Note that currently nextpnr will not pack an FF (*FS1P3DX) into the PIO. + if "i" in pin.dir: + get_ireg(pin.i_clk, i, pin_i) + if "o" in pin.dir: + get_oreg(pin.o_clk, pin_o, o) + if pin.dir in ("oe", "io"): + get_oreg(pin.o_clk, ~pin.oe, t) + elif pin.xdr == 2: + if "i" in pin.dir: + get_iddr(pin.i_clk, i, pin_i0, pin_i1) + if "o" in pin.dir: + get_oddr(pin.o_clk, pin_o0, pin_o1, o) + if pin.dir in ("oe", "io"): + # It looks like Diamond will not pack an OREG as a tristate register in a DDR PIO. + # It is not clear what is the recommended set of primitives for this task. + # Similarly, nextpnr will not pack anything as a tristate register in a DDR PIO. + get_oreg(pin.o_clk, ~pin.oe, t) + else: + assert False + + return (i, o, t) + + def get_input(self, pin, port, attrs, invert): + self._check_feature("single-ended input", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) + for bit in range(len(port)): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", + i_I=port.io[bit], + o_O=i[bit] + ) + return m + + def get_output(self, pin, port, attrs, invert): + self._check_feature("single-ended output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(len(port)): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", + i_I=o[bit], + o_O=port.io[bit] + ) + return m + + def get_tristate(self, pin, port, attrs, invert): + self._check_feature("single-ended tristate", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(len(port)): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", + i_T=t, + i_I=o[bit], + o_O=port.io[bit] + ) + return m + + def get_input_output(self, pin, port, attrs, invert): + self._check_feature("single-ended input/output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) + for bit in range(len(port)): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", + i_T=t, + i_I=o[bit], + o_O=i[bit], + io_B=port.io[bit] + ) + return m + + def get_diff_input(self, pin, port, attrs, invert): + self._check_feature("differential input", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", + i_I=port.p[bit], + o_O=i[bit] + ) + return m + + def get_diff_output(self, pin, port, attrs, invert): + self._check_feature("differential output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", + i_I=o[bit], + o_O=port.p[bit], + ) + return m + + def get_diff_tristate(self, pin, port, attrs, invert): + self._check_feature("differential tristate", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", + i_T=t, + i_I=o[bit], + o_O=port.p[bit], + ) + return m + + def get_diff_input_output(self, pin, port, attrs, invert): + self._check_feature("differential input/output", pin, attrs, + valid_xdrs=(0, 1, 2), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", + i_T=t, + i_I=o[bit], + o_O=i[bit], + io_B=port.p[bit], + ) + return m + + # CDC primitives are not currently specialized for MachXO2/MachXO3L. + + +LatticeMachXO2Platform = LatticeMachXO2Or3LPlatform +LatticeMachXO3LPlatform = LatticeMachXO2Or3LPlatform diff --git a/amaranth/vendor/quicklogic.py b/amaranth/vendor/quicklogic.py new file mode 100644 index 0000000..ebfbf46 --- /dev/null +++ b/amaranth/vendor/quicklogic.py @@ -0,0 +1,185 @@ +from abc import abstractproperty + +from ..hdl import * +from ..lib.cdc import ResetSynchronizer +from ..build import * + + +__all__ = ["QuicklogicPlatform"] + + +class QuicklogicPlatform(TemplatedPlatform): + """ + Symbiflow toolchain + ------------------- + + Required tools: + * ``symbiflow_synth`` + * ``symbiflow_pack`` + * ``symbiflow_place`` + * ``symbiflow_route`` + * ``symbiflow_write_fasm`` + * ``symbiflow_write_bitstream`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_QLSymbiflow``, if present. + + Available overrides: + * ``add_constraints``: inserts commands in XDC file. + """ + + device = abstractproperty() + package = abstractproperty() + + # Since the QuickLogic version of SymbiFlow toolchain is not upstreamed yet + # we should distinguish the QuickLogic version from mainline one. + # QuickLogic toolchain: https://github.com/QuickLogic-Corp/quicklogic-fpga-toolchain/releases + toolchain = "QLSymbiflow" + + required_tools = [ + "symbiflow_synth", + "symbiflow_pack", + "symbiflow_place", + "symbiflow_route", + "symbiflow_write_fasm", + "symbiflow_write_bitstream", + "symbiflow_write_openocd", + ] + file_templates = { + **TemplatedPlatform.build_script_templates, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.pcf": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_io {{port_name}} {{pin_name}} + {% endfor %} + """, + "{{name}}.xdc": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + {% for attr_name, attr_value in attrs.items() -%} + set_property {{attr_name}} {{attr_value}} [get_ports {{port_name|tcl_escape}} }] + {% endfor %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + "{{name}}.sdc": r""" + # {{autogenerated}} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + create_clock -period {{100000000/frequency}} {{port_signal.name|ascii_escape}} + {% endif %} + {% endfor %} + """ + } + command_templates = [ + r""" + {{invoke_tool("symbiflow_synth")}} + -t {{name}} + -v {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} {{file}} {% endfor %} {{name}}.v + -d {{platform.device}} + -p {{name}}.pcf + -P {{platform.package}} + -x {{name}}.xdc + """, + r""" + {{invoke_tool("symbiflow_pack")}} + -e {{name}}.eblif + -d {{platform.device}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_place")}} + -e {{name}}.eblif + -d {{platform.device}} + -p {{name}}.pcf + -n {{name}}.net + -P {{platform.package}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_route")}} + -e {{name}}.eblif + -d {{platform.device}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_write_fasm")}} + -e {{name}}.eblif + -d {{platform.device}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_write_bitstream")}} + -f {{name}}.fasm + -d {{platform.device}} + -P {{platform.package}} + -b {{name}}.bit + """, + # This should be `invoke_tool("symbiflow_write_openocd")`, but isn't because of a bug in + # the QLSymbiflow v1.3.0 toolchain release. + r""" + python3 -m quicklogic_fasm.bitstream_to_openocd + {{name}}.bit + {{name}}.openocd + --osc-freq {{platform.osc_freq}} + --fpga-clk-divider {{platform.osc_div}} + """, + ] + + # Common logic + + @property + def default_clk_constraint(self): + if self.default_clk == "sys_clk0": + return Clock(self.osc_freq / self.osc_div) + return super().default_clk_constraint + + def add_clock_constraint(self, clock, frequency): + super().add_clock_constraint(clock, frequency) + clock.attrs["keep"] = "TRUE" + + def create_missing_domain(self, name): + if name == "sync" and self.default_clk is not None: + m = Module() + if self.default_clk == "sys_clk0": + if not hasattr(self, "osc_div"): + raise ValueError("OSC divider (osc_div) must be an integer between 2 " + "and 512") + if not isinstance(self.osc_div, int) or self.osc_div < 2 or self.osc_div > 512: + raise ValueError("OSC divider (osc_div) must be an integer between 2 " + "and 512, not {!r}" + .format(self.osc_div)) + if not hasattr(self, "osc_freq"): + raise ValueError("OSC frequency (osc_freq) must be an integer between 2100000 " + "and 80000000") + if not isinstance(self.osc_freq, int) or self.osc_freq < 2100000 or self.osc_freq > 80000000: + raise ValueError("OSC frequency (osc_freq) must be an integer between 2100000 " + "and 80000000, not {!r}" + .format(self.osc_freq)) + clk_i = Signal() + sys_clk0 = Signal() + m.submodules += Instance("qlal4s3b_cell_macro", + o_Sys_Clk0=sys_clk0) + m.submodules += Instance("gclkbuff", + o_A=sys_clk0, + o_Z=clk_i) + else: + clk_i = self.request(self.default_clk).i + + if self.default_rst is not None: + rst_i = self.request(self.default_rst).i + else: + rst_i = Const(0) + + m.domains += ClockDomain("sync") + m.d.comb += ClockSignal("sync").eq(clk_i) + m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync") + return m diff --git a/amaranth/vendor/xilinx.py b/amaranth/vendor/xilinx.py new file mode 100644 index 0000000..96632a8 --- /dev/null +++ b/amaranth/vendor/xilinx.py @@ -0,0 +1,1060 @@ +from abc import abstractproperty + +from ..hdl import * +from ..lib.cdc import ResetSynchronizer +from ..build import * + + +__all__ = ["XilinxPlatform"] + + +class XilinxPlatform(TemplatedPlatform): + """ + Vivado toolchain + ---------------- + + Required tools: + * ``vivado`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Vivado``, if present. + + Available overrides: + * ``script_after_read``: inserts commands after ``read_xdc`` in Tcl script. + * ``script_after_synth``: inserts commands after ``synth_design`` in Tcl script. + * ``script_after_place``: inserts commands after ``place_design`` in Tcl script. + * ``script_after_route``: inserts commands after ``route_design`` in Tcl script. + * ``script_before_bitstream``: inserts commands before ``write_bitstream`` in Tcl script. + * ``script_after_bitstream``: inserts commands after ``write_bitstream`` in Tcl script. + * ``add_constraints``: inserts commands in XDC file. + * ``vivado_opts``: adds extra options for ``vivado``. + + Build products: + * ``{{name}}.log``: Vivado log. + * ``{{name}}_timing_synth.rpt``: Vivado report. + * ``{{name}}_utilization_hierarchical_synth.rpt``: Vivado report. + * ``{{name}}_utilization_synth.rpt``: Vivado report. + * ``{{name}}_utilization_hierarchical_place.rpt``: Vivado report. + * ``{{name}}_utilization_place.rpt``: Vivado report. + * ``{{name}}_io.rpt``: Vivado report. + * ``{{name}}_control_sets.rpt``: Vivado report. + * ``{{name}}_clock_utilization.rpt``: Vivado report. + * ``{{name}}_route_status.rpt``: Vivado report. + * ``{{name}}_drc.rpt``: Vivado report. + * ``{{name}}_methodology.rpt``: Vivado report. + * ``{{name}}_timing.rpt``: Vivado report. + * ``{{name}}_power.rpt``: Vivado report. + * ``{{name}}_route.dcp``: Vivado design checkpoint. + * ``{{name}}.bit``: binary bitstream with metadata. + * ``{{name}}.bin``: binary bitstream. + + ISE toolchain + ------------- + + Required tools: + * ``xst`` + * ``ngdbuild`` + * ``map`` + * ``par`` + * ``bitgen`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_ISE``, if present. + + Available overrides: + * ``script_after_run``: inserts commands after ``run`` in XST script. + * ``add_constraints``: inserts commands in UCF file. + * ``xst_opts``: adds extra options for ``xst``. + * ``ngdbuild_opts``: adds extra options for ``ngdbuild``. + * ``map_opts``: adds extra options for ``map``. + * ``par_opts``: adds extra options for ``par``. + * ``bitgen_opts``: adds extra and overrides default options for ``bitgen``; + default options: ``-g Compress``. + + Build products: + * ``{{name}}.srp``: synthesis report. + * ``{{name}}.ngc``: synthesized RTL. + * ``{{name}}.bld``: NGDBuild log. + * ``{{name}}.ngd``: design database. + * ``{{name}}_map.map``: MAP log. + * ``{{name}}_map.mrp``: mapping report. + * ``{{name}}_map.ncd``: mapped netlist. + * ``{{name}}.pcf``: physical constraints. + * ``{{name}}_par.par``: PAR log. + * ``{{name}}_par_pad.txt``: I/O usage report. + * ``{{name}}_par.ncd``: place and routed netlist. + * ``{{name}}.drc``: DRC report. + * ``{{name}}.bgn``: BitGen log. + * ``{{name}}.bit``: binary bitstream with metadata. + * ``{{name}}.bin``: raw binary bitstream. + + Symbiflow toolchain + ------------------- + + Required tools: + * ``symbiflow_synth`` + * ``symbiflow_pack`` + * ``symbiflow_place`` + * ``symbiflow_route`` + * ``symbiflow_write_fasm`` + * ``symbiflow_write_bitstream`` + + The environment is populated by running the script specified in the environment variable + ``AMARANTH_ENV_Symbiflow``, if present. + + Available overrides: + * ``add_constraints``: inserts commands in XDC file. + """ + + toolchain = None # selected when creating platform + + device = abstractproperty() + package = abstractproperty() + speed = abstractproperty() + + @property + def _part(self): + if self.family in {"ultrascale", "ultrascaleplus"}: + return "{}-{}-{}".format(self.device, self.package, self.speed) + else: + return "{}{}-{}".format(self.device, self.package, self.speed) + + # Vivado templates + + _vivado_required_tools = ["vivado"] + _vivado_file_templates = { + **TemplatedPlatform.build_script_templates, + "build_{{name}}.sh": r""" + # {{autogenerated}} + set -e{{verbose("x")}} + if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi + [ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}" + {{emit_commands("sh")}} + """, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.tcl": r""" + # {{autogenerated}} + create_project -force -name {{name}} -part {{platform._part}} + {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} + add_files {{file|tcl_escape}} + {% endfor %} + add_files {{name}}.v + read_xdc {{name}}.xdc + {% for file in platform.iter_files(".xdc") -%} + read_xdc {{file|tcl_escape}} + {% endfor %} + {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} + synth_design -top {{name}} + foreach cell [get_cells -quiet -hier -filter {amaranth.vivado.false_path == "TRUE"}] { + set_false_path -to $cell + } + foreach cell [get_cells -quiet -hier -filter {amaranth.vivado.max_delay != ""}] { + set clock [get_clocks -of_objects \ + [all_fanin -flat -startpoints_only [get_pin $cell/D]]] + if {[llength $clock] != 0} { + set_max_delay -datapath_only -from $clock \ + -to [get_cells $cell] [get_property amaranth.vivado.max_delay $cell] + } + } + {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} + report_timing_summary -file {{name}}_timing_synth.rpt + report_utilization -hierarchical -file {{name}}_utilization_hierarchical_synth.rpt + report_utilization -file {{name}}_utilization_synth.rpt + opt_design + place_design + {{get_override("script_after_place")|default("# (script_after_place placeholder)")}} + report_utilization -hierarchical -file {{name}}_utilization_hierarchical_place.rpt + report_utilization -file {{name}}_utilization_place.rpt + report_io -file {{name}}_io.rpt + report_control_sets -verbose -file {{name}}_control_sets.rpt + report_clock_utilization -file {{name}}_clock_utilization.rpt + route_design + {{get_override("script_after_route")|default("# (script_after_route placeholder)")}} + phys_opt_design + report_timing_summary -no_header -no_detailed_paths + write_checkpoint -force {{name}}_route.dcp + report_route_status -file {{name}}_route_status.rpt + report_drc -file {{name}}_drc.rpt + report_methodology -file {{name}}_methodology.rpt + report_timing_summary -datasheet -max_paths 10 -file {{name}}_timing.rpt + report_power -file {{name}}_power.rpt + {{get_override("script_before_bitstream")|default("# (script_before_bitstream placeholder)")}} + write_bitstream -force -bin_file {{name}}.bit + {{get_override("script_after_bitstream")|default("# (script_after_bitstream placeholder)")}} + quit + """, + "{{name}}.xdc": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_property LOC {{pin_name}} [get_ports {{port_name|tcl_escape}}] + {% for attr_name, attr_value in attrs.items() -%} + set_property {{attr_name}} {{attr_value|tcl_escape}} [get_ports {{port_name|tcl_escape}}] + {% endfor %} + {% endfor %} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is not none -%} + create_clock -name {{port_signal.name|ascii_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] + {% else -%} + create_clock -name {{net_signal.name|ascii_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] + {% endif %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """ + } + _vivado_command_templates = [ + r""" + {{invoke_tool("vivado")}} + {{verbose("-verbose")}} + {{get_override("vivado_opts")|options}} + -mode batch + -log {{name}}.log + -source {{name}}.tcl + """ + ] + + # ISE toolchain + + _ise_required_tools = [ + "xst", + "ngdbuild", + "map", + "par", + "bitgen", + ] + _ise_file_templates = { + **TemplatedPlatform.build_script_templates, + "build_{{name}}.sh": r""" + # {{autogenerated}} + set -e{{verbose("x")}} + if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi + [ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}" + {{emit_commands("sh")}} + """, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.prj": r""" + # {{autogenerated}} + {% for file in platform.iter_files(".vhd", ".vhdl") -%} + vhdl work {{file}} + {% endfor %} + {% for file in platform.iter_files(".v") -%} + verilog work {{file}} + {% endfor %} + verilog work {{name}}.v + """, + "{{name}}.xst": r""" + # {{autogenerated}} + run + -ifn {{name}}.prj + -ofn {{name}}.ngc + -top {{name}} + -use_new_parser yes + -p {{platform.device}}{{platform.package}}-{{platform.speed}} + {{get_override("script_after_run")|default("# (script_after_run placeholder)")}} + """, + "{{name}}.ucf": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + {% set port_name = port_name|replace("[", "<")|replace("]", ">") -%} + NET "{{port_name}}" LOC={{pin_name}}; + {% for attr_name, attr_value in attrs.items() -%} + NET "{{port_name}}" {{attr_name}}={{attr_value}}; + {% endfor %} + {% endfor %} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + NET "{{net_signal|hierarchy("/")}}" TNM_NET="PRD{{net_signal|hierarchy("/")}}"; + TIMESPEC "TS{{net_signal|hierarchy("__")}}"=PERIOD "PRD{{net_signal|hierarchy("/")}}" {{1000000000/frequency}} ns HIGH 50%; + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """ + } + _ise_command_templates = [ + r""" + {{invoke_tool("xst")}} + {{get_override("xst_opts")|options}} + -ifn {{name}}.xst + """, + r""" + {{invoke_tool("ngdbuild")}} + {{quiet("-quiet")}} + {{verbose("-verbose")}} + {{get_override("ngdbuild_opts")|options}} + -uc {{name}}.ucf + {{name}}.ngc + """, + r""" + {{invoke_tool("map")}} + {{verbose("-detail")}} + {{get_override("map_opts")|default([])|options}} + -w + -o {{name}}_map.ncd + {{name}}.ngd + {{name}}.pcf + """, + r""" + {{invoke_tool("par")}} + {{get_override("par_opts")|default([])|options}} + -w + {{name}}_map.ncd + {{name}}_par.ncd + {{name}}.pcf + """, + r""" + {{invoke_tool("bitgen")}} + {{get_override("bitgen_opts")|default(["-g Compress"])|options}} + -w + -g Binary:Yes + {{name}}_par.ncd + {{name}}.bit + """ + ] + + # Symbiflow templates + + _symbiflow_part_map = { + "xc7a35ticsg324-1L": "xc7a35tcsg324-1", # Arty-A7 + } + + _symbiflow_required_tools = [ + "symbiflow_synth", + "symbiflow_pack", + "symbiflow_place", + "symbiflow_route", + "symbiflow_write_fasm", + "symbiflow_write_bitstream" + ] + _symbiflow_file_templates = { + **TemplatedPlatform.build_script_templates, + "{{name}}.v": r""" + /* {{autogenerated}} */ + {{emit_verilog()}} + """, + "{{name}}.debug.v": r""" + /* {{autogenerated}} */ + {{emit_debug_verilog()}} + """, + "{{name}}.pcf": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + set_io {{port_name}} {{pin_name}} + {% endfor %} + """, + "{{name}}.xdc": r""" + # {{autogenerated}} + {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} + {% for attr_name, attr_value in attrs.items() -%} + set_property {{attr_name}} {{attr_value}} [get_ports {{port_name|tcl_escape}} }] + {% endfor %} + {% endfor %} + {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} + """, + "{{name}}.sdc": r""" + # {{autogenerated}} + {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} + {% if port_signal is none -%} + create_clock -period {{1000000000/frequency}} {{net_signal.name|ascii_escape}} + {% endif %} + {% endfor %} + """ + } + _symbiflow_command_templates = [ + r""" + {{invoke_tool("symbiflow_synth")}} + -t {{name}} + -v {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} {{file}} {% endfor %} {{name}}.v + -p {{platform._symbiflow_part_map.get(platform._part, platform._part)}} + -x {{name}}.xdc + """, + r""" + {{invoke_tool("symbiflow_pack")}} + -e {{name}}.eblif + -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_place")}} + -e {{name}}.eblif + -p {{name}}.pcf + -n {{name}}.net + -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_route")}} + -e {{name}}.eblif + -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} + -s {{name}}.sdc + """, + r""" + {{invoke_tool("symbiflow_write_fasm")}} + -e {{name}}.eblif + -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} + """, + r""" + {{invoke_tool("symbiflow_write_bitstream")}} + -f {{name}}.fasm + -p {{platform._symbiflow_part_map.get(platform._part, platform._part)}} + -b {{name}}.bit + """ + ] + + # Common logic + + def __init__(self, *, toolchain=None): + super().__init__() + + # Determine device family. + device = self.device.lower() + # Remove the prefix. + if device.startswith("xc"): + device = device[2:] + elif device.startswith("xa"): + device = device[2:] + elif device.startswith("xqr"): + device = device[3:] + elif device.startswith("xq"): + device = device[2:] + else: + raise ValueError("Device '{}' is not recognized".format(self.device)) + # Do actual name matching. + if device.startswith("2vp"): + self.family = "virtex2p" + elif device.startswith("2v"): + self.family = "virtex2" + elif device.startswith("3sd"): + self.family = "spartan3adsp" + elif device.startswith("3s"): + if device.endswith("a"): + self.family = "spartan3a" + elif device.endswith("e"): + self.family = "spartan3e" + else: + self.family = "spartan3" + elif device.startswith("4v"): + self.family = "virtex4" + elif device.startswith("5v"): + self.family = "virtex5" + elif device.startswith("6v"): + self.family = "virtex6" + elif device.startswith("6s"): + self.family = "spartan6" + elif device.startswith("7"): + self.family = "series7" + elif device.startswith(("vu", "ku")): + if device.endswith("p"): + self.family = "ultrascaleplus" + else: + self.family = "ultrascale" + elif device.startswith(("zu", "u", "k26")): + self.family = "ultrascaleplus" + elif device.startswith(("v", "2s")): + # Match last to avoid conflict with ultrascale. + # Yes, Spartan 2 is the same thing as Virtex. + if device.endswith("e"): + self.family = "virtexe" + else: + self.family = "virtex" + + + ISE_FAMILIES = { + "virtex", "virtexe", + "virtex2", "virtex2p", + "spartan3", "spartan3e", "spartan3a", "spartan3adsp", + "virtex4", + "virtex5", + "virtex6", + "spartan6", + } + if toolchain is None: + if self.family in ISE_FAMILIES: + toolchain = "ISE" + else: + toolchain = "Vivado" + + assert toolchain in ("Vivado", "ISE", "Symbiflow") + if toolchain == "Vivado": + if self.family in ISE_FAMILIES: + raise ValueError("Family '{}' is not supported by the Vivado toolchain, please use ISE instead".format(self.family)) + elif toolchain == "ISE": + if self.family not in ISE_FAMILIES and self.family != "series7": + raise ValueError("Family '{}' is not supported by the ISE toolchain, please use Vivado instead".format(self.family)) + elif toolchain == "Symbiflow": + if self.family != "series7": + raise ValueError("Family '{}' is not supported by the Symbiflow toolchain".format(self.family)) + self.toolchain = toolchain + + @property + def required_tools(self): + if self.toolchain == "Vivado": + return self._vivado_required_tools + if self.toolchain == "ISE": + return self._ise_required_tools + if self.toolchain == "Symbiflow": + return self._symbiflow_required_tools + assert False + + @property + def file_templates(self): + if self.toolchain == "Vivado": + return self._vivado_file_templates + if self.toolchain == "ISE": + return self._ise_file_templates + if self.toolchain == "Symbiflow": + return self._symbiflow_file_templates + assert False + + @property + def command_templates(self): + if self.toolchain == "Vivado": + return self._vivado_command_templates + if self.toolchain == "ISE": + return self._ise_command_templates + if self.toolchain == "Symbiflow": + return self._symbiflow_command_templates + assert False + + def create_missing_domain(self, name): + # Xilinx devices have a global write enable (GWE) signal that asserted during configuraiton + # and deasserted once it ends. Because it is an asynchronous signal (GWE is driven by logic + # syncronous to configuration clock, which is not used by most designs), even though it is + # a low-skew global network, its deassertion may violate a setup/hold constraint with + # relation to a user clock. The recommended solution is to use a BUFGCE driven by the EOS + # signal (if available). For details, see: + # * https://www.xilinx.com/support/answers/44174.html + # * https://www.xilinx.com/support/documentation/white_papers/wp272.pdf + + STARTUP_PRIMITIVE = { + "spartan6": "STARTUP_SPARTAN6", + "virtex4": "STARTUP_VIRTEX4", + "virtex5": "STARTUP_VIRTEX5", + "virtex6": "STARTUP_VIRTEX6", + "series7": "STARTUPE2", + "ultrascale": "STARTUPE3", + "ultrascaleplus": "STARTUPE3", + } + + if self.family not in STARTUP_PRIMITIVE or self.toolchain == "Symbiflow": + # Spartan 3 and before lacks a STARTUP primitive with EOS output; use a simple ResetSynchronizer + # in that case, as is the default. + # Symbiflow does not support the STARTUPE2 primitive. + return super().create_missing_domain(name) + + if name == "sync" and self.default_clk is not None: + clk_i = self.request(self.default_clk).i + if self.default_rst is not None: + rst_i = self.request(self.default_rst).i + + m = Module() + ready = Signal() + m.submodules += Instance(STARTUP_PRIMITIVE[self.family], o_EOS=ready) + m.domains += ClockDomain("sync", reset_less=self.default_rst is None) + if self.toolchain != "Vivado": + m.submodules += Instance("BUFGCE", i_CE=ready, i_I=clk_i, o_O=ClockSignal("sync")) + elif self.family == "series7": + # Actually use BUFGCTRL configured as BUFGCE, since using BUFGCE causes + # sim/synth mismatches with Vivado 2019.2, and the suggested workaround + # (SIM_DEVICE parameter) breaks Vivado 2017.4. + m.submodules += Instance("BUFGCTRL", + p_SIM_DEVICE="7SERIES", + i_I0=clk_i, i_S0=C(1, 1), i_CE0=ready, i_IGNORE0=C(0, 1), + i_I1=C(1, 1), i_S1=C(0, 1), i_CE1=C(0, 1), i_IGNORE1=C(1, 1), + o_O=ClockSignal("sync") + ) + else: + m.submodules += Instance("BUFGCE", + p_SIM_DEVICE="ULTRASCALE", + i_CE=ready, + i_I=clk_i, + o_O=ClockSignal("sync") + ) + if self.default_rst is not None: + m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync") + return m + + def add_clock_constraint(self, clock, frequency): + super().add_clock_constraint(clock, frequency) + clock.attrs["keep"] = "TRUE" + + def _get_xdr_buffer(self, m, pin, iostd, *, i_invert=False, o_invert=False): + XFDDR_FAMILIES = { + "virtex2", + "virtex2p", + "spartan3", + } + XDDR2_FAMILIES = { + "spartan3e", + "spartan3a", + "spartan3adsp", + "spartan6", + } + XDDR_FAMILIES = { + "virtex4", + "virtex5", + "virtex6", + "series7", + } + XDDRE1_FAMILIES = { + "ultrascale", + "ultrascaleplus", + } + + def get_iob_dff(clk, d, q): + # SDR I/O is performed by packing a flip-flop into the pad IOB. + for bit in range(len(q)): + m.submodules += Instance("FDCE", + a_IOB="TRUE", + i_C=clk, + i_CE=Const(1), + i_CLR=Const(0), + i_D=d[bit], + o_Q=q[bit] + ) + + def get_dff(clk, d, q): + for bit in range(len(q)): + m.submodules += Instance("FDCE", + i_C=clk, + i_CE=Const(1), + i_CLR=Const(0), + i_D=d[bit], + o_Q=q[bit] + ) + + def get_ifddr(clk, io, q0, q1): + assert self.family in XFDDR_FAMILIES + for bit in range(len(q0)): + m.submodules += Instance("IFDDRCPE", + i_C0=clk, i_C1=~clk, + i_CE=Const(1), + i_CLR=Const(0), i_PRE=Const(0), + i_D=io[bit], + o_Q0=q0[bit], o_Q1=q1[bit] + ) + + def get_iddr2(clk, d, q0, q1, alignment): + assert self.family in XDDR2_FAMILIES + for bit in range(len(q0)): + m.submodules += Instance("IDDR2", + p_DDR_ALIGNMENT=alignment, + p_SRTYPE="ASYNC", + p_INIT_Q0=C(0, 1), p_INIT_Q1=C(0, 1), + i_C0=clk, i_C1=~clk, + i_CE=Const(1), + i_S=Const(0), i_R=Const(0), + i_D=d[bit], + o_Q0=q0[bit], o_Q1=q1[bit] + ) + + def get_iddr(clk, d, q1, q2): + assert self.family in XDDR_FAMILIES or self.family in XDDRE1_FAMILIES + for bit in range(len(q1)): + if self.family in XDDR_FAMILIES: + m.submodules += Instance("IDDR", + p_DDR_CLK_EDGE="SAME_EDGE_PIPELINED", + p_SRTYPE="ASYNC", + p_INIT_Q1=C(0, 1), p_INIT_Q2=C(0, 1), + i_C=clk, + i_CE=Const(1), + i_S=Const(0), i_R=Const(0), + i_D=d[bit], + o_Q1=q1[bit], o_Q2=q2[bit] + ) + else: + m.submodules += Instance("IDDRE1", + p_DDR_CLK_EDGE="SAME_EDGE_PIPELINED", + p_IS_C_INVERTED=C(0, 1), p_IS_CB_INVERTED=C(1, 1), + i_C=clk, i_CB=clk, + i_R=Const(0), + i_D=d[bit], + o_Q1=q1[bit], o_Q2=q2[bit] + ) + + def get_fddr(clk, d0, d1, q): + for bit in range(len(q)): + if self.family in XFDDR_FAMILIES: + m.submodules += Instance("FDDRCPE", + i_C0=clk, i_C1=~clk, + i_CE=Const(1), + i_PRE=Const(0), i_CLR=Const(0), + i_D0=d0[bit], i_D1=d1[bit], + o_Q=q[bit] + ) + else: + m.submodules += Instance("ODDR2", + p_DDR_ALIGNMENT="NONE", + p_SRTYPE="ASYNC", + p_INIT=C(0, 1), + i_C0=clk, i_C1=~clk, + i_CE=Const(1), + i_S=Const(0), i_R=Const(0), + i_D0=d0[bit], i_D1=d1[bit], + o_Q=q[bit] + ) + + def get_oddr(clk, d1, d2, q): + for bit in range(len(q)): + if self.family in XDDR2_FAMILIES: + m.submodules += Instance("ODDR2", + p_DDR_ALIGNMENT="C0", + p_SRTYPE="ASYNC", + p_INIT=C(0, 1), + i_C0=clk, i_C1=~clk, + i_CE=Const(1), + i_S=Const(0), i_R=Const(0), + i_D0=d1[bit], i_D1=d2[bit], + o_Q=q[bit] + ) + elif self.family in XDDR_FAMILIES: + m.submodules += Instance("ODDR", + p_DDR_CLK_EDGE="SAME_EDGE", + p_SRTYPE="ASYNC", + p_INIT=C(0, 1), + i_C=clk, + i_CE=Const(1), + i_S=Const(0), i_R=Const(0), + i_D1=d1[bit], i_D2=d2[bit], + o_Q=q[bit] + ) + elif self.family in XDDRE1_FAMILIES: + m.submodules += Instance("ODDRE1", + p_SRVAL=C(0, 1), + i_C=clk, + i_SR=Const(0), + i_D1=d1[bit], i_D2=d2[bit], + o_Q=q[bit] + ) + + def get_ineg(y, invert): + if invert: + a = Signal.like(y, name_suffix="_n") + m.d.comb += y.eq(~a) + return a + else: + return y + + def get_oneg(a, invert): + if invert: + y = Signal.like(a, name_suffix="_n") + m.d.comb += y.eq(~a) + return y + else: + return a + + if "i" in pin.dir: + if pin.xdr < 2: + pin_i = get_ineg(pin.i, i_invert) + elif pin.xdr == 2: + pin_i0 = get_ineg(pin.i0, i_invert) + pin_i1 = get_ineg(pin.i1, i_invert) + if "o" in pin.dir: + if pin.xdr < 2: + pin_o = get_oneg(pin.o, o_invert) + elif pin.xdr == 2: + pin_o0 = get_oneg(pin.o0, o_invert) + pin_o1 = get_oneg(pin.o1, o_invert) + + i = o = t = None + if "i" in pin.dir: + i = Signal(pin.width, name="{}_xdr_i".format(pin.name)) + if "o" in pin.dir: + o = Signal(pin.width, name="{}_xdr_o".format(pin.name)) + if pin.dir in ("oe", "io"): + t = Signal(1, name="{}_xdr_t".format(pin.name)) + + if pin.xdr == 0: + if "i" in pin.dir: + i = pin_i + if "o" in pin.dir: + o = pin_o + if pin.dir in ("oe", "io"): + t = ~pin.oe + elif pin.xdr == 1: + if "i" in pin.dir: + get_iob_dff(pin.i_clk, i, pin_i) + if "o" in pin.dir: + get_iob_dff(pin.o_clk, pin_o, o) + if pin.dir in ("oe", "io"): + get_iob_dff(pin.o_clk, ~pin.oe, t) + elif pin.xdr == 2: + # On Spartan 3E/3A, the situation with DDR registers is messy: while the hardware + # supports same-edge alignment, it does so by borrowing the resources of the other + # pin in the differential pair (if any). Since we cannot be sure if the other pin + # is actually unused (or if the pin is even part of a differential pair in the first + # place), we only use the hardware alignment feature in two cases: + # + # - differential inputs (since the other pin's input registers will be unused) + # - true differential outputs (since they use only one pin's output registers, + # as opposed to pseudo-differential outputs that use both) + TRUE_DIFF_S3EA = { + "LVDS_33", "LVDS_25", + "MINI_LVDS_33", "MINI_LVDS_25", + "RSDS_33", "RSDS_25", + "PPDS_33", "PPDS_25", + "TMDS_33", + } + DIFF_S3EA = TRUE_DIFF_S3EA | { + "DIFF_HSTL_I", + "DIFF_HSTL_III", + "DIFF_HSTL_I_18", + "DIFF_HSTL_II_18", + "DIFF_HSTL_III_18", + "DIFF_SSTL3_I", + "DIFF_SSTL3_II", + "DIFF_SSTL2_I", + "DIFF_SSTL2_II", + "DIFF_SSTL18_I", + "DIFF_SSTL18_II", + "BLVDS_25", + } + if "i" in pin.dir: + if self.family in XFDDR_FAMILIES: + # First-generation input DDR register: basically just two FFs with opposite + # clocks. Add a register on both outputs, so that they enter fabric on + # the same clock edge, adding one cycle of latency. + i0_ff = Signal.like(pin_i0, name_suffix="_ff") + i1_ff = Signal.like(pin_i1, name_suffix="_ff") + get_dff(pin.i_clk, i0_ff, pin_i0) + get_dff(pin.i_clk, i1_ff, pin_i1) + get_iob_dff(pin.i_clk, i, i0_ff) + get_iob_dff(~pin.i_clk, i, i1_ff) + elif self.family in XDDR2_FAMILIES: + if self.family == 'spartan6' or iostd in DIFF_S3EA: + # Second-generation input DDR register: hw realigns i1 to positive clock edge, + # but also misaligns it with i0 input. Re-register first input before it + # enters fabric. This allows both inputs to enter fabric on the same clock + # edge, and adds one cycle of latency. + i0_ff = Signal.like(pin_i0, name_suffix="_ff") + get_dff(pin.i_clk, i0_ff, pin_i0) + get_iddr2(pin.i_clk, i, i0_ff, pin_i1, "C0") + else: + # No extra register available for hw alignment, use extra registers. + i0_ff = Signal.like(pin_i0, name_suffix="_ff") + i1_ff = Signal.like(pin_i1, name_suffix="_ff") + get_dff(pin.i_clk, i0_ff, pin_i0) + get_dff(pin.i_clk, i1_ff, pin_i1) + get_iddr2(pin.i_clk, i, i0_ff, i1_ff, "NONE") + else: + # Third-generation input DDR register: does all of the above on its own. + get_iddr(pin.i_clk, i, pin_i0, pin_i1) + if "o" in pin.dir: + if self.family in XFDDR_FAMILIES or self.family == "spartan3e" or (self.family.startswith("spartan3a") and iostd not in TRUE_DIFF_S3EA): + # For this generation, we need to realign o1 input ourselves. + o1_ff = Signal.like(pin_o1, name_suffix="_ff") + get_dff(pin.o_clk, pin_o1, o1_ff) + get_fddr(pin.o_clk, pin_o0, o1_ff, o) + else: + get_oddr(pin.o_clk, pin_o0, pin_o1, o) + if pin.dir in ("oe", "io"): + if self.family == "spartan6": + get_oddr(pin.o_clk, ~pin.oe, ~pin.oe, t) + else: + get_iob_dff(pin.o_clk, ~pin.oe, t) + else: + assert False + + return (i, o, t) + + def _get_valid_xdrs(self): + if self.family in {"virtex", "virtexe"}: + return (0, 1) + else: + return (0, 1, 2) + + def get_input(self, pin, port, attrs, invert): + self._check_feature("single-ended input", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), i_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IBUF", + i_I=port.io[bit], + o_O=i[bit] + ) + return m + + def get_output(self, pin, port, attrs, invert): + self._check_feature("single-ended output", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), o_invert=invert) + if self.toolchain != "Symbiflow": + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUF", + i_I=o[bit], + o_O=port.io[bit] + ) + else: + m.d.comb += port.eq(self._invert_if(invert, o)) + return m + + def get_tristate(self, pin, port, attrs, invert): + if self.toolchain == "Symbiflow": + return super().get_tristate(pin, port, attrs, invert) + + self._check_feature("single-ended tristate", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFT", + i_T=t, + i_I=o[bit], + o_O=port.io[bit] + ) + return m + + def get_input_output(self, pin, port, attrs, invert): + if self.toolchain == "Symbiflow": + return super().get_input_output(pin, port, attrs, invert) + + self._check_feature("single-ended input/output", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), i_invert=invert, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IOBUF", + i_T=t, + i_I=o[bit], + o_O=i[bit], + io_IO=port.io[bit] + ) + return m + + def get_diff_input(self, pin, port, attrs, invert): + if self.toolchain == "Symbiflow": + return super().get_diff_input(pin, port, attrs, invert) + + self._check_feature("differential input", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), i_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IBUFDS", + i_I=port.p[bit], i_IB=port.n[bit], + o_O=i[bit] + ) + return m + + def get_diff_output(self, pin, port, attrs, invert): + if self.toolchain == "Symbiflow": + return super().get_diff_output(pin, port, attrs, invert) + + self._check_feature("differential output", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFDS", + i_I=o[bit], + o_O=port.p[bit], o_OB=port.n[bit] + ) + return m + + def get_diff_tristate(self, pin, port, attrs, invert): + if self.toolchain == "Symbiflow": + return super().get_diff_tristate(pin, port, attrs, invert) + + self._check_feature("differential tristate", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFTDS", + i_T=t, + i_I=o[bit], + o_O=port.p[bit], o_OB=port.n[bit] + ) + return m + + def get_diff_input_output(self, pin, port, attrs, invert): + if self.toolchain == "Symbiflow": + return super().get_diff_input_output(pin, port, attrs, invert) + + self._check_feature("differential input/output", pin, attrs, + valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) + m = Module() + i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), i_invert=invert, o_invert=invert) + for bit in range(pin.width): + m.submodules["{}_{}".format(pin.name, bit)] = Instance("IOBUFDS", + i_T=t, + i_I=o[bit], + o_O=i[bit], + io_IO=port.p[bit], io_IOB=port.n[bit] + ) + return m + + # The synchronizer implementations below apply two separate but related timing constraints. + # + # First, the ASYNC_REG attribute prevents inference of shift registers from synchronizer FFs, + # and constraints the FFs to be placed as close as possible, ideally in one CLB. This attribute + # only affects the synchronizer FFs themselves. + # + # Second, for Vivado only, the amaranth.vivado.false_path or amaranth.vivado.max_delay attribute + # affects the path into the synchronizer. If maximum input delay is specified, a datapath-only + # maximum delay constraint is applied, limiting routing delay (and therefore skew) at + # the synchronizer input. Otherwise, a false path constraint is used to omit the input path + # from the timing analysis. + + def get_ff_sync(self, ff_sync): + m = Module() + flops = [Signal(ff_sync.i.shape(), name="stage{}".format(index), + reset=ff_sync._reset, reset_less=ff_sync._reset_less, + attrs={"ASYNC_REG": "TRUE"}) + for index in range(ff_sync._stages)] + if self.toolchain == "Vivado": + if ff_sync._max_input_delay is None: + flops[0].attrs["amaranth.vivado.false_path"] = "TRUE" + else: + flops[0].attrs["amaranth.vivado.max_delay"] = str(ff_sync._max_input_delay * 1e9) + elif ff_sync._max_input_delay is not None: + raise NotImplementedError("Platform '{}' does not support constraining input delay " + "for FFSynchronizer" + .format(type(self).__name__)) + for i, o in zip((ff_sync.i, *flops), flops): + m.d[ff_sync._o_domain] += o.eq(i) + m.d.comb += ff_sync.o.eq(flops[-1]) + return m + + + def get_async_ff_sync(self, async_ff_sync): + m = Module() + m.domains += ClockDomain("async_ff", async_reset=True, local=True) + flops = [Signal(1, name="stage{}".format(index), reset=1, + attrs={"ASYNC_REG": "TRUE"}) + for index in range(async_ff_sync._stages)] + if self.toolchain == "Vivado": + if async_ff_sync._max_input_delay is None: + flops[0].attrs["amaranth.vivado.false_path"] = "TRUE" + else: + flops[0].attrs["amaranth.vivado.max_delay"] = str(async_ff_sync._max_input_delay * 1e9) + elif async_ff_sync._max_input_delay is not None: + raise NotImplementedError("Platform '{}' does not support constraining input delay " + "for AsyncFFSynchronizer" + .format(type(self).__name__)) + for i, o in zip((0, *flops), flops): + m.d.async_ff += o.eq(i) + + if async_ff_sync._edge == "pos": + m.d.comb += ResetSignal("async_ff").eq(async_ff_sync.i) + else: + m.d.comb += ResetSignal("async_ff").eq(~async_ff_sync.i) + + m.d.comb += [ + ClockSignal("async_ff").eq(ClockSignal(async_ff_sync._o_domain)), + async_ff_sync.o.eq(flops[-1]) + ] + + return m diff --git a/amaranth/vendor/xilinx_7series.py b/amaranth/vendor/xilinx_7series.py new file mode 100644 index 0000000..59ee182 --- /dev/null +++ b/amaranth/vendor/xilinx_7series.py @@ -0,0 +1,15 @@ +import warnings + +from .xilinx import XilinxPlatform + + +__all__ = ["Xilinx7SeriesPlatform"] + + +Xilinx7SeriesPlatform = XilinxPlatform + + +# TODO(amaranth-0.4): remove +warnings.warn("instead of amaranth.vendor.xilinx_7series.Xilinx7SeriesPlatform, " + "use amaranth.vendor.xilinx.XilinxPlatform", + DeprecationWarning, stacklevel=2) diff --git a/amaranth/vendor/xilinx_spartan_3_6.py b/amaranth/vendor/xilinx_spartan_3_6.py new file mode 100644 index 0000000..fbed4a1 --- /dev/null +++ b/amaranth/vendor/xilinx_spartan_3_6.py @@ -0,0 +1,16 @@ +import warnings + +from .xilinx import XilinxPlatform + + +__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"] + + +XilinxSpartan3APlatform = XilinxPlatform +XilinxSpartan6Platform = XilinxPlatform + + +# TODO(amaranth-0.4): remove +warnings.warn("instead of amaranth.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and " + ".XilinxSpartan6Platform, use amaranth.vendor.xilinx.XilinxPlatform", + DeprecationWarning, stacklevel=2) diff --git a/amaranth/vendor/xilinx_ultrascale.py b/amaranth/vendor/xilinx_ultrascale.py new file mode 100644 index 0000000..43fb2f2 --- /dev/null +++ b/amaranth/vendor/xilinx_ultrascale.py @@ -0,0 +1,15 @@ +import warnings + +from .xilinx import XilinxPlatform + + +__all__ = ["XilinxUltraScalePlatform"] + + +XilinxUltraScalePlatform = XilinxPlatform + + +# TODO(amaranth-0.4): remove +warnings.warn("instead of amaranth.vendor.xilinx_ultrascale.XilinxUltraScalePlatform, " + "use amaranth.vendor.xilinx.XilinxPlatform", + DeprecationWarning, stacklevel=2) diff --git a/docs/_code/led_blinker.py b/docs/_code/led_blinker.py index 8fef849..986be2a 100644 --- a/docs/_code/led_blinker.py +++ b/docs/_code/led_blinker.py @@ -1,4 +1,4 @@ -from nmigen import * +from amaranth import * class LEDBlinker(Elaboratable): @@ -18,7 +18,7 @@ class LEDBlinker(Elaboratable): return m # --- BUILD --- -from nmigen_boards.icestick import * +from amaranth_boards.icestick import * ICEStickPlatform().build(LEDBlinker(), do_program=True) diff --git a/docs/_code/up_counter.py b/docs/_code/up_counter.py index 6f49360..3ab1f4f 100644 --- a/docs/_code/up_counter.py +++ b/docs/_code/up_counter.py @@ -1,4 +1,4 @@ -from nmigen import * +from amaranth import * class UpCounter(Elaboratable): @@ -41,7 +41,7 @@ class UpCounter(Elaboratable): return m # --- TEST --- -from nmigen.sim import Simulator +from amaranth.sim import Simulator dut = UpCounter(25) @@ -71,7 +71,7 @@ sim.add_sync_process(bench) with sim.write_vcd("up_counter.vcd"): sim.run() # --- CONVERT --- -from nmigen.back import verilog +from amaranth.back import verilog top = UpCounter(25) diff --git a/docs/_code/up_counter.v b/docs/_code/up_counter.v index c8cde8e..8a5b330 100644 --- a/docs/_code/up_counter.v +++ b/docs/_code/up_counter.v @@ -1,8 +1,8 @@ -(* generator = "nMigen" *) +(* generator = "Amaranth" *) module top(clk, rst, en, ovf); - (* src = "/nmigen/hdl/ir.py:526" *) + (* src = "/amaranth/hdl/ir.py:526" *) input clk; - (* src = "/nmigen/hdl/ir.py:526" *) + (* src = "/amaranth/hdl/ir.py:526" *) input rst; (* src = "up_counter.py:26" *) input en; @@ -38,7 +38,7 @@ module top(clk, rst, en, ovf); \count$next = \$3 [15:0]; endcase endcase - (* src = "/nmigen/hdl/xfrm.py:518" *) + (* src = "/amaranth/hdl/xfrm.py:518" *) casez (rst) 1'h1: \count$next = 16'h0000; diff --git a/docs/_historical/COMPAT_SUMMARY.md b/docs/_historical/COMPAT_SUMMARY.md deleted file mode 100644 index 31d3ae2..0000000 --- a/docs/_historical/COMPAT_SUMMARY.md +++ /dev/null @@ -1,167 +0,0 @@ -Migen and nMigen compatibility summary -====================================== - -nMigen intends to provide as close to 100% compatibility to Migen as possible without compromising its other goals. However, Migen widely uses `*` imports, tends to expose implementation details, and in general does not have a well-defined interface. This document attempts to elucidate a well-defined Migen API surface (including, when necessary, private items that have been used downstream), and describes the intended nMigen replacements and their implementation status. - -API change legend: - - *id*: identical - - *obs*: removed or incompatibly changed with compatibility stub provided - - *obs →n*: removed or incompatibly changed with compatibility stub provided, use *n* instead - - *brk*: removed or incompatibly changed with no replacement provided - - *brk →n*: removed or incompatibly changed with no replacement provided, use *n* instead - - *→n*: renamed to *n* - - *⇒m*: merged into *m* - - *a=→b=*: parameter *a* renamed to *b* - - *a=∼*: parameter *a* removed - - *.a=→.b*: attribute *a* renamed to *b* - - *.a=∼*: attribute *a* removed - - *?*: no decision made yet - -When describing renames or replacements, `mod` refers to a 3rd-party package `mod` (no nMigen implementation provided), `.mod.item` refers to `nmigen.mod.item`, and "(import `.item`)" means that, while `item` is provided under `nmigen.mod.item`, it is aliased to, and should be imported from a shorter path for readability. - -Status legend: - - (−) No decision yet, or no replacement implemented - - (+) Implemented replacement (the API and/or compatibility shim are provided) - - (⊕) Verified replacement and/or compatibility shim (the compatibility shim is manually reviewed and/or has 100% test coverage) - - (⊙) No direct replacement or compatibility shim is provided - -Compatibility summary ---------------------- - - - (−) `fhdl` → `.hdl` - - (⊕) `bitcontainer` ⇒ `.tools` - - (⊕) `log2_int` id - - (⊕) `bits_for` id - - (⊕) `value_bits_sign` → `Value.shape` - - (⊕) `conv_output` **obs** - - (⊕) `decorators` ⇒ `.hdl.xfrm` -
Note: `transform_*` methods not considered part of public API. - - (⊙) `ModuleTransformer` **brk** - - (⊙) `ControlInserter` **brk** - - (⊕) `CEInserter` → `EnableInserter` - - (⊕) `ResetInserter` id - - (⊕) `ClockDomainsRenamer` → `DomainRenamer`, `cd_remapping=`→`domain_map=` - - (⊙) `edif` **brk** - - (⊕) `module` **obs** → `.hdl.dsl` -
Note: any class inheriting from `Module` in oMigen should inherit from `Elaboratable` in nMigen and use an nMigen `Module` in its `.elaborate()` method. - - (⊕) `FinalizeError` **obs** - - (⊕) `Module` **obs** → `.hdl.dsl.Module` - - (⊙) `namer` **brk** - - (⊙) `simplify` **brk** - - (⊕) `specials` **obs** - - (⊙) `Special` **brk** - - (⊕) `Tristate` **obs** - - (⊕) `TSTriple` **obs** → `.lib.io.Pin` - - (⊕) `Instance` → `.hdl.ir.Instance` - - (⊕) `Memory` id -
Note: nMigen memories should not be added as submodules. - - (⊕) `.get_port` **obs** → `.read_port()` + `.write_port()` - - (⊕) `_MemoryPort` **obs** → `.hdl.mem.ReadPort` + `.hdl.mem.WritePort` - - (⊕) `READ_FIRST`/`WRITE_FIRST` **obs** -
Note: `READ_FIRST` corresponds to `mem.read_port(transparent=False)`, and `WRITE_FIRST` to `mem.read_port(transparent=True)`. - - (⊙) `NO_CHANGE` **brk** -
Note: in designs using `NO_CHANGE`, replace it with logic implementing required semantics explicitly, or with a different mode. - - (⊕) `structure` → `.hdl.ast` - - (⊕) `DUID` id - - (⊕) `_Value` → `Value` -
Note: values no longer valid as keys in `dict` and `set`; use `ValueDict` and `ValueSet` instead. - - (⊕) `wrap` → `Value.cast` - - (⊕) `_Operator` → `Operator`, `op=`→`operator=`, `.op`→`.operator` - - (⊕) `Mux` id - - (⊕) `_Slice` → `Slice` id - - (⊕) `_Part` → `Part` id - - (⊕) `Cat` id, `.l`→`.parts` - - (⊕) `Replicate` → `Repl`, `v=`→`value=`, `n=`→`count=`, `.v`→`.value`, `.n`→`.count` - - (⊕) `Constant` → `Const`, `bits_sign=`→`shape=`, `.nbits`→`.width` - - (⊕) `Signal` id, `bits_sign=`→`shape=`, `attr=`→`attrs=`, `name_override=`∼, `related=`, `variable=`∼, `.nbits`→`.width` - - (⊕) `ClockSignal` id, `cd=`→`domain=`, `.cd`→`.domain` - - (⊕) `ResetSignal` id, `cd=`→`domain=`, `.cd`→`.domain` - - (⊕) `_Statement` → `Statement` - - (⊕) `_Assign` → `Assign`, `l=`→`lhs=`, `r=`→`rhs=` - - (⊕) `_check_statement` **obs** → `Statement.cast` - - (⊕) `If` **obs** → `.hdl.dsl.Module.If` - - (⊕) `Case` **obs** → `.hdl.dsl.Module.Switch` - - (⊕) `_ArrayProxy` → `.hdl.ast.ArrayProxy`, `choices=`→`elems=`, `key=`→`index=` - - (⊕) `Array` id - - (⊕) `ClockDomain` → `.hdl.cd.ClockDomain` - - (⊙) `_ClockDomainList` **brk** - - (⊙) `SPECIAL_INPUT`/`SPECIAL_OUTPUT`/`SPECIAL_INOUT` **brk** - - (⊙) `_Fragment` **brk** → `.hdl.ir.Fragment` - - (⊙) `tools` **brk** - - (⊙) `insert_resets` **brk** → `.hdl.xfrm.ResetInserter` - - (⊙) `rename_clock_domain` **brk** → `.hdl.xfrm.DomainRenamer` - - (⊙) `tracer` **brk** - - (⊕) `get_var_name` → `.tracer.get_var_name` - - (⊙) `remove_underscore` **brk** - - (⊙) `get_obj_var_name` **brk** - - (⊙) `index_id` **brk** - - (⊙) `trace_back` **brk** - - (⊙) `verilog` - - (⊙) `DummyAttrTranslate` ? - - (⊕) `convert` **obs** → `.back.verilog.convert` - - (⊙) `visit` **brk** → `.hdl.xfrm` - - (⊙) `NodeVisitor` **brk** - - (⊙) `NodeTransformer` **brk** → `.hdl.xfrm.ValueTransformer`/`.hdl.xfrm.StatementTransformer` - - (−) `genlib` → `.lib` - - (−) `cdc` ? - - (⊙) `MultiRegImpl` **brk** - - (⊕) `MultiReg` → `.lib.cdc.FFSynchronizer` - - (−) `PulseSynchronizer` ? - - (−) `BusSynchronizer` ? - - (⊕) `GrayCounter` **obs** → `.lib.coding.GrayEncoder` - - (⊕) `GrayDecoder` **obs** → `.lib.coding.GrayDecoder` -
Note: `.lib.coding.GrayEncoder` and `.lib.coding.GrayDecoder` are purely combinatorial. - - (−) `ElasticBuffer` ? - - (−) `lcm` ? - - (−) `Gearbox` ? - - (⊕) `coding` id - - (⊕) `Encoder` id - - (⊕) `PriorityEncoder` id - - (⊕) `Decoder` id - - (⊕) `PriorityDecoder` id - - (−) `divider` ? - - (−) `Divider` ? - - (⊕) `fifo` → `.lib.fifo` - - (⊕) `_FIFOInterface` → `FIFOInterface` - - (⊕) `SyncFIFO` id, `.replace=`∼ - - (⊕) `SyncFIFOBuffered` id, `.fifo=`∼ - - (⊕) `AsyncFIFO` ? - - (⊕) `AsyncFIFOBuffered`, `.fifo=`∼ - - (⊕) `fsm` **obs** -
Note: FSMs are a part of core nMigen DSL; however, not all functionality is provided. The compatibility shim is a complete port of Migen FSM module. - - (⊙) `io` **brk** -
Note: all functionality in this module is a part of nMigen platform system. - - (−) `misc` ? - - (−) `split` ? - - (−) `displacer` ? - - (−) `chooser` ? - - (−) `timeline` ? - - (−) `WaitTimer` ? - - (−) `BitSlip` ? - - (⊕) `record` **obs** → `.hdl.rec.Record` -
Note: nMigen uses a `Layout` object to represent record layouts. - - (⊕) `DIR_NONE` id - - (⊕) `DIR_M_TO_S` → `DIR_FANOUT` - - (⊕) `DIR_S_TO_M` → `DIR_FANIN` - - (⊕) `Record` id - - (⊙) `set_layout_parameters` **brk** - - (⊙) `layout_len` **brk** - - (⊙) `layout_get` **brk** - - (⊙) `layout_partial` **brk** - - (⊕) `resetsync` **obs** - - (⊕) `AsyncResetSynchronizer` **obs** → `.lib.cdc.ResetSynchronizer` - - (−) `roundrobin` ? - - (−) `SP_WITHDRAW`/`SP_CE` ? - - (−) `RoundRobin` ? - - (−) `sort` ? - - (−) `BitonicSort` ? - - (⊕) `sim` **obs** → `.back.pysim` -
Note: only items directly under `nmigen.compat.sim`, not submodules, are provided. - - (⊙) `core` **brk** - - (⊙) `vcd` **brk** → `vcd` - - (⊙) `Simulator` **brk** - - (⊕) `run_simulation` **obs** → `.back.pysim.Simulator` - - (⊕) `passive` **obs** → `.hdl.ast.Passive` - - (⊙) `build` **brk** -
Note: the build system has been completely redesigned in nMigen. - - (⊙) `util` **brk** diff --git a/docs/_historical/PROPOSAL.md b/docs/_historical/PROPOSAL.md deleted file mode 100644 index 91b8555..0000000 --- a/docs/_historical/PROPOSAL.md +++ /dev/null @@ -1,241 +0,0 @@ -*The text below is the original nMigen implementation proposal. It is provided for illustrative and historical purposes only.* - -This repository contains a proposal for the design of nMigen in form of an implementation. This implementation deviates from the existing design of Migen by making several observations of its drawbacks: - - * Migen is strongly tailored towards Verilog, yet translation of Migen to Verilog is not straightforward, leaves much semantics implicit (e.g. signedness, width extension, combinatorial assignments, sub-signal assignments...); - * Hierarchical designs are useful for floorplanning and optimization, yet Migen does not support them at all; - * Migen's syntax is not easily composable, and something like an FSM requires extending Migen's syntax in non-orthogonal ways; - * Migen reimplements a lot of mature open-source tooling, such as conversion of RTL to Verilog (Yosys' Verilog backend), or simulation (Icarus Verilog, Verilator, etc.), and often lacks in features, speed, or corner case handling. - * Migen requires awkward specials for some FPGA features such as asynchronous resets. - -It also observes that Yosys' intermediate language, RTLIL, is an ideal target for Migen-style logic, as conversion of FHDL to RTLIL is essentially a 1:1 translation, with the exception of the related issues of naming and hierarchy. - -This proposal makes several major changes to Migen that hopefully solve all of these drawbacks: - - * nMigen changes FHDL's internal representation to closely match that of RTLIL; - * nMigen outputs RTLIL and relies on Yosys for conversion to Verilog, EDIF, etc; - * nMigen uses an exact mapping between FHDL signals and RTLIL names to off-load logic simulation to Icarus Verilog, Verilator, etc; - * nMigen uses an uniform, composable Python eHDL; - * nMigen outputs hierarchical RTLIL, automatically threading signals through the hierarchy; - * nMigen supports asynchronous reset directly; - * nMigen makes driving a signal from multiple clock domains a precise, hard error. - -This proposal keeps in mind but does not make the following major changes: - - * nMigen could be easily modified to flatten the hierarchy if a signal is driven simultaneously from multiple modules; - * nMigen could be easily modified to support `x` values (invalid / don't care) by relying on RTLIL's ability to directly represent them; - * nMigen could be easily modified to support negative edge triggered flip-flops by relying on RTLIL's ability to directly represent them; - * nMigen could be easily modified to track Python source locations of primitives and export them to RTLIL/Verilog through the `src` attribute, displaying the Python source locations in timing reports directly. - -This proposal also makes the following simplifications: - * Specials are eliminated. Primitives such as memory ports are represented directly, and primitives such as tristate buffers are lowered to a selectable implementation via ordinary dependency injection (`f.submodules += platform.get_tristate(triple, io)`). - -The internals of nMigen in this proposal are cleaned up, yet they are kept sufficiently close to Migen that \~all Migen code should be possible to run directly on nMigen using a syntactic compatibility layer. - -One might reasonably expect that a roundtrip through RTLIL would result in unreadable Verilog. -However, this is not the case, e.g. consider the examples: - -
-alu.v - -```verilog -module \$1 (co, sel, a, b, o); - wire [17:0] _04_; - input [15:0] a; - input [15:0] b; - output co; - reg \co$next ; - output [15:0] o; - reg [15:0] \o$next ; - input [1:0] sel; - assign _04_ = $signed(+ a) + $signed(- b); - always @* begin - \o$next = 16'h0000; - \co$next = 1'h0; - casez ({ 1'h1, sel == 2'h2, sel == 1'h1, sel == 0'b0 }) - 4'bzzz1: - \o$next = a | b; - 4'bzz1z: - \o$next = a & b; - 4'bz1zz: - \o$next = a ^ b; - 4'b1zzz: - { \co$next , \o$next } = _04_[16:0]; - endcase - end - assign o = \o$next ; - assign co = \co$next ; -endmodule -``` -
- -
-alu_hier.v - -```verilog -module add(b, o, a); - wire [16:0] _0_; - input [15:0] a; - input [15:0] b; - output [15:0] o; - reg [15:0] \o$next ; - assign _0_ = a + b; - always @* begin - \o$next = 16'h0000; - \o$next = _0_[15:0]; - end - assign o = \o$next ; -endmodule - -module sub(b, o, a); - wire [16:0] _0_; - input [15:0] a; - input [15:0] b; - output [15:0] o; - reg [15:0] \o$next ; - assign _0_ = a - b; - always @* begin - \o$next = 16'h0000; - \o$next = _0_[15:0]; - end - assign o = \o$next ; -endmodule - -module top(a, b, o, add_o, sub_o, op); - input [15:0] a; - wire [15:0] add_a; - reg [15:0] \add_a$next ; - wire [15:0] add_b; - reg [15:0] \add_b$next ; - input [15:0] add_o; - input [15:0] b; - output [15:0] o; - reg [15:0] \o$next ; - input op; - wire [15:0] sub_a; - reg [15:0] \sub_a$next ; - wire [15:0] sub_b; - reg [15:0] \sub_b$next ; - input [15:0] sub_o; - add add ( - .a(add_a), - .b(add_b), - .o(add_o) - ); - sub sub ( - .a(sub_a), - .b(sub_b), - .o(sub_o) - ); - always @* begin - \o$next = 16'h0000; - \add_a$next = 16'h0000; - \add_b$next = 16'h0000; - \sub_a$next = 16'h0000; - \sub_b$next = 16'h0000; - \add_a$next = a; - \sub_a$next = a; - \add_b$next = b; - \sub_b$next = b; - casez ({ 1'h1, op }) - 2'bz1: - \o$next = sub_o; - 2'b1z: - \o$next = add_o; - endcase - end - assign o = \o$next ; - assign add_a = \add_a$next ; - assign add_b = \add_b$next ; - assign sub_a = \sub_a$next ; - assign sub_b = \sub_b$next ; -endmodule -``` -
-
-clkdiv.v - -```verilog -module \$1 (sys_clk, o); - wire [16:0] _0_; - output o; - reg \o$next ; - input sys_clk; - wire sys_rst; - (* init = 16'hffff *) - reg [15:0] v = 16'hffff; - reg [15:0] \v$next ; - assign _0_ = v + 1'h1; - always @(posedge sys_clk) - v <= \v$next ; - always @* begin - \o$next = 1'h0; - \v$next = _0_[15:0]; - \o$next = v[15]; - casez (sys_rst) - 1'h1: - \v$next = 16'hffff; - endcase - end - assign o = \o$next ; -endmodule -``` -
- -
-arst.v - -```verilog -module \$1 (o, sys_clk, sys_rst); - wire [16:0] _0_; - output o; - reg \o$next ; - input sys_clk; - input sys_rst; - (* init = 16'h0000 *) - reg [15:0] v = 16'h0000; - reg [15:0] \v$next ; - assign _0_ = v + 1'h1; - always @(posedge sys_clk or posedge sys_rst) - if (sys_rst) - v <= 16'h0000; - else - v <= \v$next ; - always @* begin - \o$next = 1'h0; - \v$next = _0_[15:0]; - \o$next = v[15]; - end - assign o = \o$next ; -endmodule -``` -
- -
-pmux.v - -```verilog -module \$1 (c, o, s, a, b); - input [15:0] a; - input [15:0] b; - input [15:0] c; - output [15:0] o; - reg [15:0] \o$next ; - input [2:0] s; - always @* begin - \o$next = 16'h0000; - casez (s) - 3'bzz1: - \o$next = a; - 3'bz1z: - \o$next = b; - 3'b1zz: - \o$next = c; - 3'hz: - \o$next = 16'h0000; - endcase - end - assign o = \o$next ; -endmodule -``` -
diff --git a/docs/conf.py b/docs/conf.py index bf29557..dd22132 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,12 +1,12 @@ import os, sys sys.path.insert(0, os.path.abspath(".")) -import nmigen +import amaranth -project = "nMigen toolchain" -version = nmigen.__version__ +project = "Amaranth HDL toolchain" +version = amaranth.__version__ release = version.split("+")[0] -copyright = "2020, nMigen developers" +copyright = "2020—2021, Amaranth HDL developers" extensions = [ "sphinx.ext.intersphinx", diff --git a/docs/cover.rst b/docs/cover.rst index ced7431..0eeb3f4 100644 --- a/docs/cover.rst +++ b/docs/cover.rst @@ -1,8 +1,8 @@ -The nMigen documentation -######################## +Amaranth HDL documentation +########################## .. toctree:: :maxdepth: 1 index - Board definitions + Board definitions diff --git a/docs/install.rst b/docs/install.rst index 1314252..f913f44 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -4,19 +4,19 @@ Installation System requirements =================== -nMigen requires Python 3.6; it works on CPython_ 3.6 (or newer), and works faster on PyPy3.6_ 7.2 (or newer). +Amaranth HDL requires Python 3.6; it works on CPython_ 3.6 (or newer), and works faster on PyPy3.6_ 7.2 (or newer). -For most workflows, nMigen requires Yosys_ 0.9 (or newer). A compatible version of Yosys is distributed via PyPI_ for most popular platforms. +For most workflows, Amaranth requires Yosys_ 0.9 (or newer). A compatible version of Yosys is distributed via PyPI_ for most popular platforms. -Simulating nMigen code requires no additional software. However, a waveform viewer like GTKWave_ is invaluable for debugging. +Simulating Amaranth code requires no additional software. However, a waveform viewer like GTKWave_ is invaluable for debugging. -Synthesizing, placing and routing an nMigen design for an FPGA requires the FPGA family specific toolchain. +Synthesizing, placing and routing an Amaranth design for an FPGA requires the FPGA family specific toolchain. .. TODO: Link to FPGA family docs here .. _CPython: https://www.python.org/ .. _PyPy3.6: https://www.pypy.org/ -.. _Yosys: http://www.clifford.at/yosys/ +.. _Yosys: https://yosyshq.net/yosys/ .. _PyPI: https://pypi.org/ .. _GTKWave: http://gtkwave.sourceforge.net/ @@ -92,10 +92,10 @@ Installing prerequisites .. _install: -Installing nMigen -================= +Installing Amaranth +=================== -The latest release of nMigen should work well for most applications. A development snapshot---any commit from the ``master`` branch of nMigen---should be similarly reliable, but is likely to include experimental API changes that will be in flux until the next release. With that in mind, development snapshots can be used to try out new functionality or to avoid bugs fixed since the last release. +The latest release of Amaranth should work well for most applications. A development snapshot---any commit from the ``main`` branch of Amaranth---should be similarly reliable, but is likely to include experimental API changes that will be in flux until the next release. With that in mind, development snapshots can be used to try out new functionality or to avoid bugs fixed since the last release. .. _install-release: @@ -103,7 +103,7 @@ The latest release of nMigen should work well for most applications. A developme Latest release -------------- -.. |release:install| replace:: To install the latest release of nMigen, run: +.. |release:install| replace:: To install the latest release of Amaranth, run: .. platform-picker:: @@ -114,7 +114,7 @@ Latest release .. code-block:: doscon - > pip install --upgrade nmigen[builtin-yosys] + > pip install --upgrade amaranth[builtin-yosys] .. platform-choice:: macos :title: macOS @@ -123,22 +123,22 @@ Latest release .. code-block:: console - $ pip install --user --upgrade 'nmigen[builtin-yosys]' + $ pip install --user --upgrade 'amaranth[builtin-yosys]' .. platform-choice:: linux :title: Linux - If you **did not** install Yosys manually in the :ref:`previous step `, to install the latest release of nMigen, run: + If you **did not** install Yosys manually in the :ref:`previous step `, to install the latest release of Amaranth, run: .. code-block:: console - $ pip3 install --user --upgrade 'nmigen[builtin-yosys]' + $ pip3 install --user --upgrade 'amaranth[builtin-yosys]' If you **did** install Yosys manually in the previous step, run: .. code-block:: console - $ pip3 install --user --upgrade nmigen + $ pip3 install --user --upgrade amaranth .. platform-choice:: arch :altname: linux @@ -148,7 +148,7 @@ Latest release .. code-block:: console - $ sudo pacman -S python-nmigen + $ sudo pacman -S python-amaranth .. _install-snapshot: @@ -156,7 +156,7 @@ Latest release Development snapshot -------------------- -.. |snapshot:install| replace:: To install the latest development snapshot of nMigen, run: +.. |snapshot:install| replace:: To install the latest development snapshot of Amaranth, run: .. platform-picker:: @@ -167,7 +167,7 @@ Development snapshot .. code-block:: doscon - > pip install git+https://github.com/nmigen/nmigen.git#egg=nmigen[builtin-yosys] + > pip install git+https://github.com/amaranth-lang/amaranth.git#egg=amaranth[builtin-yosys] .. platform-choice:: macos :title: macOS @@ -176,22 +176,22 @@ Development snapshot .. code-block:: console - $ pip install --user 'git+https://github.com/nmigen/nmigen.git#egg=nmigen[builtin-yosys]' + $ pip install --user 'git+https://github.com/amaranth-lang/amaranth.git#egg=amaranth[builtin-yosys]' .. platform-choice:: linux :title: Linux - If you **did not** install Yosys manually in the :ref:`previous step `, to install the latest release of nMigen, run: + If you **did not** install Yosys manually in the :ref:`previous step `, to install the latest release of Amaranth, run: .. code-block:: console - $ pip3 install --user 'git+https://github.com/nmigen/nmigen.git#egg=nmigen[builtin-yosys]' + $ pip3 install --user 'git+https://github.com/amaranth-lang/amaranth.git#egg=amaranth[builtin-yosys]' If you **did** install Yosys manually in the previous step, run: .. code-block:: console - $ pip3 install --user 'git+https://github.com/nmigen/nmigen.git#egg=nmigen' + $ pip3 install --user 'git+https://github.com/amaranth-lang/amaranth.git#egg=amaranth' .. _install-develop: @@ -199,9 +199,9 @@ Development snapshot Editable development snapshot ----------------------------- -.. |develop:first-time| replace:: To install an editable development snapshot of nMigen for the first time, run: -.. |develop:update| replace:: Any changes made to the ``nmigen`` directory will immediately affect any code that uses nMigen. To update the snapshot, run: -.. |develop:reinstall| replace:: each time the editable development snapshot is updated in case package dependencies have been added or changed. Otherwise, code using nMigen may misbehave or crash with an ``ImportError``. +.. |develop:first-time| replace:: To install an editable development snapshot of Amaranth for the first time, run: +.. |develop:update| replace:: Any changes made to the ``amaranth`` directory will immediately affect any code that uses Amaranth. To update the snapshot, run: +.. |develop:reinstall| replace:: each time the editable development snapshot is updated in case package dependencies have been added or changed. Otherwise, code using Amaranth may misbehave or crash with an ``ImportError``. .. platform-picker:: @@ -212,16 +212,16 @@ Editable development snapshot .. code-block:: doscon - > git clone https://github.com/nmigen/nmigen - > cd nmigen + > git clone https://github.com/amaranth-lang/amaranth + > cd amaranth > pip install --editable .[builtin-yosys] |develop:update| .. code-block:: doscon - > cd nmigen - > git pull --ff-only origin master + > cd amaranth + > git pull --ff-only origin main > pip install --editable .[builtin-yosys] Run the ``pip install --editable .[builtin-yosys]`` command |develop:reinstall| @@ -233,16 +233,16 @@ Editable development snapshot .. code-block:: console - $ git clone https://github.com/nmigen/nmigen - $ cd nmigen + $ git clone https://github.com/amaranth-lang/amaranth + $ cd amaranth $ pip install --user --editable '.[builtin-yosys]' |develop:update| .. code-block:: console - $ cd nmigen - $ git pull --ff-only origin master + $ cd amaranth + $ git pull --ff-only origin main $ pip install --user --editable '.[builtin-yosys]' Run the ``pip install --editable .[builtin-yosys]`` command |develop:reinstall| @@ -256,16 +256,16 @@ Editable development snapshot .. code-block:: console - $ git clone https://github.com/nmigen/nmigen - $ cd nmigen + $ git clone https://github.com/amaranth-lang/amaranth + $ cd amaranth $ pip3 install --user --editable '.[builtin-yosys]' |develop:update| .. code-block:: console - $ cd nmigen - $ git pull --ff-only origin master + $ cd amaranth + $ git pull --ff-only origin main $ pip3 install --user --editable '.[builtin-yosys]' Run the ``pip3 install --editable .[builtin-yosys]`` command |develop:reinstall| @@ -276,4 +276,4 @@ Installing board definitions .. todo:: - Explain how to install ``_. + Explain how to install ``_. diff --git a/docs/intro.rst b/docs/intro.rst index f0e0d09..2fed044 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -1,61 +1,61 @@ .. TODO: this introduction is written for people well familiar with HDLs; we likely need - another one for people who will use nMigen as their first HDL + another one for people who will use Amaranth as their first HDL Introduction ############ -The core nMigen project provides an open-source toolchain for developing hardware based on synchronous digital logic using the Python programming language. It aims to be easy to learn and use, reduce or eliminate common coding mistakes, and simplify the design of complex hardware with reusable components. +The Amaranth project provides an open-source toolchain for developing hardware based on synchronous digital logic using the Python programming language. It aims to be easy to learn and use, reduce or eliminate common coding mistakes, and simplify the design of complex hardware with reusable components. -The nMigen toolchain consists of the :ref:`nMigen language `, the :ref:`standard library `, the :ref:`simulator `, and the :ref:`build system `, covering all steps of a typical FPGA development workflow. At the same time, it does not restrict the designer's choice of tools: existing industry-standard (System)Verilog or VHDL code can be integrated into an nMigen design flow, or, conversely, nMigen code can be integrated into an existing Verilog-based design flow. +The Amaranth toolchain consists of the :ref:`Amaranth language `, the :ref:`standard library `, the :ref:`simulator `, and the :ref:`build system `, covering all steps of a typical FPGA development workflow. At the same time, it does not restrict the designer's choice of tools: existing industry-standard (System)Verilog or VHDL code can be integrated into an Amaranth-based design flow, or, conversely, Amaranth code can be integrated into an existing Verilog-based design flow. .. TODO: add links to connect_rpc docs once they exist .. _intro-lang: -The nMigen language -=================== +The Amaranth language +===================== -The :doc:`nMigen hardware description language ` is a Python library for register transfer level modeling of synchronous logic. Ordinary Python code is used to construct a netlist of a digital circuit, which can be simulated, directly synthesized via Yosys_, or converted to human-readable Verilog code for use with industry-standard toolchains. +The :doc:`Amaranth hardware description language ` is a Python library for register transfer level modeling of synchronous logic. Ordinary Python code is used to construct a netlist of a digital circuit, which can be simulated, directly synthesized via Yosys_, or converted to human-readable Verilog code for use with industry-standard toolchains. -By relying on the flexibility, rich functionality and widespread adoption of the Python language, the nMigen language is focused on a single task: modeling digital logic well. It has first-class support for building blocks like clock domains and finite state machines, and uses simple rules for arithmetic operations that closely match the Python semantics. Python classes, functions, loops and conditionals can be used to build organized and flexible designs; Python libraries can be seamlessly used with nMigen during design or verification; and Python development tools can process nMigen code. +By relying on the flexibility, rich functionality and widespread adoption of the Python language, the Amaranth language is focused on a single task: modeling digital logic well. It has first-class support for building blocks like clock domains and finite state machines, and uses simple rules for arithmetic operations that closely match the Python semantics. Python classes, functions, loops and conditionals can be used to build organized and flexible designs; Python libraries can be seamlessly used with Amaranth during design or verification; and Python development tools can process Amaranth code. -A core design principle of the nMigen language is to be not only easy to use, but also hard to accidentally misuse. Some HDLs provide functionality that has unexpected and undesirable behavior in synthesis, often with expensive consequences, and require a significant effort in learning a "safe" coding style and adopting third-party linting tools. nMigen lacks non-synthesizable constructs and avoids error-prone inference in favor of explicit instantiation. It has many diagnostics (and regularly adds new ones) highlighting potential design issues. Most importantly, all usability issues are considered `reportable bugs`_. +A core design principle of the Amaranth language is to be not only easy to use, but also hard to accidentally misuse. Some HDLs provide functionality that has unexpected and undesirable behavior in synthesis, often with expensive consequences, and require a significant effort in learning a "safe" coding style and adopting third-party linting tools. Amaranth lacks non-synthesizable constructs and avoids error-prone inference in favor of explicit instantiation. It has many diagnostics (and regularly adds new ones) highlighting potential design issues. Most importantly, all usability issues are considered `reportable bugs`_. -.. _Yosys: http://www.clifford.at/yosys/ -.. _reportable bugs: https://github.com/nmigen/nmigen/issues +.. _Yosys: https://yosyshq.net/yosys/ +.. _reportable bugs: https://github.com/amaranth-lang/amaranth/issues .. _intro-stdlib: -The nMigen standard library -=========================== +The Amaranth standard library +============================= -The nMigen language comes with a standard library---a collection of essential digital design components and interfaces. It includes clock domain crossing primitives, synchronous and asynchronous FIFOs, a flexible I/O buffer interface, and more. By providing reliable building blocks out of the box, nMigen allows the designer to focus on their application and avoids subtle differences in behavior between different designs. +The Amaranth language comes with a standard library---a collection of essential digital design components and interfaces. It includes clock domain crossing primitives, synchronous and asynchronous FIFOs, a flexible I/O buffer interface, and more. By providing reliable building blocks out of the box, Amaranth allows the designer to focus on their application and avoids subtle differences in behavior between different designs. .. TODO: link to stdlib here -Clock domain crossing often requires special treatment, such as using vendor-defined attributes or instantiating device-specific primitives. The CDC primitives in the nMigen standard library can be overridden by the platform integration, and every platform integration included with nMigen follows the vendor recommendations for CDC. +Clock domain crossing often requires special treatment, such as using vendor-defined attributes or instantiating device-specific primitives. The CDC primitives in the Amaranth standard library can be overridden by the platform integration, and every platform integration included with Amaranth follows the vendor recommendations for CDC. -High-speed designs usually require the use of registered (and sometimes, geared) I/O buffers. The nMigen standard library provides a common interface to be used between I/O buffers and peripheral implementations. The nMigen build system, if used, can instantiate I/O buffers for every platform integration included with nMigen. +High-speed designs usually require the use of registered (and sometimes, geared) I/O buffers. The Amaranth standard library provides a common interface to be used between I/O buffers and peripheral implementations. The Amaranth build system, if used, can instantiate I/O buffers for every platform integration included with Amaranth. -While many designs will use at least some vendor-specific functionality, the components provided by the nMigen standard library reduce the amount of code that needs to be changed when migrating between FPGA families, and the common interfaces simplify peripherals, test benches and simulations. +While many designs will use at least some vendor-specific functionality, the components provided by the Amaranth standard library reduce the amount of code that needs to be changed when migrating between FPGA families, and the common interfaces simplify peripherals, test benches and simulations. -The nMigen standard library is optional: the nMigen language can be used without it. Conversely, it is possible to use the nMigen standard library components in Verilog or VHDL code, with some limitations. +The Amaranth standard library is optional: the Amaranth language can be used without it. Conversely, it is possible to use the Amaranth standard library components in Verilog or VHDL code, with some limitations. .. TODO: link to connect_rpc docs here *again* .. _intro-sim: -The nMigen simulator -==================== +The Amaranth simulator +====================== -The nMigen project includes an advanced simulator for nMigen code implemented in Python with no system dependencies; in this simulator, test benches are written as Python generator functions. Of course, it is always possible to convert an nMigen design to Verilog for use with well-known tool like `Icarus Verilog`_ or Verilator_. +The Amaranth project includes an advanced simulator for Amaranth code implemented in Python with no system dependencies; in this simulator, test benches are written as Python generator functions. Of course, it is always possible to convert an Amaranth design to Verilog for use with well-known tool like `Icarus Verilog`_ or Verilator_. -The nMigen simulator is event-driven and can simulate designs with multiple clocks or asynchronous resets. Although it is slower than `Icarus Verilog`_, it compiles the netlist to Python code ahead of time, achieving remarkably high performance for a pure Python implementation---especially when running on PyPy_. +The Amaranth simulator is event-driven and can simulate designs with multiple clocks or asynchronous resets. Although it is slower than `Icarus Verilog`_, it compiles the netlist to Python code ahead of time, achieving remarkably high performance for a pure Python implementation---especially when running on PyPy_. -Although nMigen does not support native code simulation or co-simulation at the moment, such support will be added in near future. +Although Amaranth does not support native code simulation or co-simulation at the moment, such support will be added in near future. .. _Icarus Verilog: http://iverilog.icarus.com/ .. _Verilator: https://www.veripool.org/wiki/verilator @@ -65,10 +65,10 @@ Although nMigen does not support native code simulation or co-simulation at the .. _intro-build: -The nMigen build system -======================= +The Amaranth build system +========================= -To achieve an end-to-end FPGA development workflow, the nMigen project integrates with all major FPGA toolchains and provides definitions for many common development boards. +To achieve an end-to-end FPGA development workflow, the Amaranth project integrates with all major FPGA toolchains and provides definitions for many common development boards. .. TODO: link to vendor docs and board docs here @@ -76,18 +76,18 @@ To achieve an end-to-end FPGA development workflow, the nMigen project integrate FPGA toolchain integration -------------------------- -Each FPGA family requires the use of synthesis and place & route tools specific for that device family. The nMigen build system directly integrates with every major open-source and commercial FPGA toolchain, and can be easily extended to cover others. +Each FPGA family requires the use of synthesis and place & route tools specific for that device family. The Amaranth build system directly integrates with every major open-source and commercial FPGA toolchain, and can be easily extended to cover others. -Through this integration, nMigen can specialize the CDC primitives and I/O buffers for a particular device and toolchain; generate I/O and clock constraints from board definition files; synchronize the power-on reset in single-clock designs; include (System)Verilog and VHDL files in the design (if supported by the toolchain); and finally, generate a script running synthesis, placement, routing, and timing analysis. The generated code can be customized to insert additional options, commands, constraints, and so on. +Through this integration, Amaranth can specialize the CDC primitives and I/O buffers for a particular device and toolchain; generate I/O and clock constraints from board definition files; synchronize the power-on reset in single-clock designs; include (System)Verilog and VHDL files in the design (if supported by the toolchain); and finally, generate a script running synthesis, placement, routing, and timing analysis. The generated code can be customized to insert additional options, commands, constraints, and so on. -The nMigen build system produces self-contained, portable build trees that require only the toolchain to be present in the environment. This makes builds easier to reproduce, or to run on a remote machine. The generated build scripts are always provided for both \*nix and Windows. +The Amaranth build system produces self-contained, portable build trees that require only the toolchain to be present in the environment. This makes builds easier to reproduce, or to run on a remote machine. The generated build scripts are always provided for both \*nix and Windows. Development board definitions ----------------------------- -Getting started with a new FPGA development board often requires going through a laborous and error-prone process of deriving toolchain configuration and constraint files from the supplied documentation. The nMigen project includes a community-maintained repository of definitions for many open-source and commercial FPGA development boards. +Getting started with a new FPGA development board often requires going through a laborous and error-prone process of deriving toolchain configuration and constraint files from the supplied documentation. The Amaranth project includes a community-maintained repository of definitions for many open-source and commercial FPGA development boards. These board definitions contain everything that is necessary to start using the board: FPGA family and model, clocks and resets, descriptions of on-board peripherals (including pin direction and attributes such as I/O standard), connector pinouts, and for boards with a built-in debug probe, the steps required to program the board. It takes a single Python invocation to generate, build, and download a test design that shows whether the board, toolchain, and programmer are working correctly. -nMigen establishes a pin naming convention for many common peripherals (such as 7-segment displays, SPI flashes and SDRAM memories), enabling the reuse of unmodified interface code with many different boards. Further, the polarity of all control signals is unified to be active high, eliminating accidental polarity inversions and making simulation traces easier to follow; active low signals are inverted during I/O buffer instantiation. +Amaranth establishes a pin naming convention for many common peripherals (such as 7-segment displays, SPI flashes and SDRAM memories), enabling the reuse of unmodified interface code with many different boards. Further, the polarity of all control signals is unified to be active high, eliminating accidental polarity inversions and making simulation traces easier to follow; active low signals are inverted during I/O buffer instantiation. diff --git a/docs/lang.rst b/docs/lang.rst index d2ab200..65c0c82 100644 --- a/docs/lang.rst +++ b/docs/lang.rst @@ -5,7 +5,7 @@ Language guide This guide is a work in progress and is seriously incomplete! -This guide introduces the nMigen language in depth. It assumes familiarity with synchronous digital logic and the Python programming language, but does not require prior experience with any hardware description language. See the :doc:`tutorial ` for a step-by-step introduction to the language. +This guide introduces the Amaranth language in depth. It assumes familiarity with synchronous digital logic and the Python programming language, but does not require prior experience with any hardware description language. See the :doc:`tutorial ` for a step-by-step introduction to the language. .. TODO: link to a good synchronous logic tutorial and a Python tutorial? @@ -15,23 +15,23 @@ This guide introduces the nMigen language in depth. It assumes familiarity with The prelude =========== -Because nMigen is a regular Python library, it needs to be imported before use. The root ``nmigen`` module, called *the prelude*, is carefully curated to export a small amount of the most essential names, useful in nearly every design. In source files dedicated to nMigen code, it is a good practice to use a :ref:`glob import ` for readability: +Because Amaranth is a regular Python library, it needs to be imported before use. The root ``amaranth`` module, called *the prelude*, is carefully curated to export a small amount of the most essential names, useful in nearly every design. In source files dedicated to Amaranth code, it is a good practice to use a :ref:`glob import ` for readability: .. code-block:: - from nmigen import * + from amaranth import * -However, if a source file uses nMigen together with other libraries, or if glob imports are frowned upon, it is conventional to use a short alias instead: +However, if a source file uses Amaranth together with other libraries, or if glob imports are frowned upon, it is conventional to use a short alias instead: .. code-block:: - import nmigen as nm + import amaranth as am All of the examples below assume that a glob import is used. .. testsetup:: - from nmigen import * + from amaranth import * .. _lang-values: @@ -39,7 +39,7 @@ All of the examples below assume that a glob import is used. Values ====== -The basic building block of the nMigen language is a *value*, which is a term for a binary number that is computed or stored anywhere in the design. Each value has a *width*---the amount of bits used to represent the value---and a *signedness*---the interpretation of the value by arithmetic operations---collectively called its *shape*. Signed values always use `two's complement`_ representation. +The basic building block of the Amaranth language is a *value*, which is a term for a binary number that is computed or stored anywhere in the design. Each value has a *width*---the amount of bits used to represent the value---and a *signedness*---the interpretation of the value by arithmetic operations---collectively called its *shape*. Signed values always use `two's complement`_ representation. .. _two's complement: https://en.wikipedia.org/wiki/Two's_complement @@ -49,14 +49,14 @@ The basic building block of the nMigen language is a *value*, which is a term fo Constants ========= -The simplest nMigen value is a *constant*, representing a fixed number, and introduced using ``Const(...)`` or its short alias ``C(...)``: +The simplest Amaranth value is a *constant*, representing a fixed number, and introduced using ``Const(...)`` or its short alias ``C(...)``: .. doctest:: >>> ten = Const(10) >>> minus_two = C(-2) -The code above does not specify any shape for the constants. If the shape is omitted, nMigen uses unsigned shape for positive numbers and signed shape for negative numbers, with the width inferred from the smallest amount of bits necessary to represent the number. As a special case, in order to get the same inferred shape for ``True`` and ``False``, ``0`` is considered to be 1-bit unsigned. +The code above does not specify any shape for the constants. If the shape is omitted, Amaranth uses unsigned shape for positive numbers and signed shape for negative numbers, with the width inferred from the smallest amount of bits necessary to represent the number. As a special case, in order to get the same inferred shape for ``True`` and ``False``, ``0`` is considered to be 1-bit unsigned. .. doctest:: @@ -212,7 +212,7 @@ Specifying a shape with an enumeration is convenient for finite state machines, Value casting ============= -Like shapes, values may be *cast* from other objects, which are called *value-castable*. Casting allows objects that are not provided by nMigen, such as integers or enumeration members, to be used in nMigen expressions directly. +Like shapes, values may be *cast* from other objects, which are called *value-castable*. Casting allows objects that are not provided by Amaranth, such as integers or enumeration members, to be used in Amaranth expressions directly. .. TODO: link to UserValue @@ -348,7 +348,7 @@ Signals assigned in a :ref:`combinatorial ` domain are not affected b Operators ========= -To describe computations, nMigen values can be combined with each other or with :ref:`value-castable ` objects using a rich array of arithmetic, bitwise, logical, bit sequence, and other *operators* to form *expressions*, which are themselves values. +To describe computations, Amaranth values can be combined with each other or with :ref:`value-castable ` objects using a rich array of arithmetic, bitwise, logical, bit sequence, and other *operators* to form *expressions*, which are themselves values. .. _lang-abstractexpr: @@ -364,7 +364,7 @@ Code written in the Python language *performs* computations on concrete objects, >>> a + 1 6 -In contrast, code written in the nMigen language *describes* computations on abstract objects, like :ref:`signals `, with the goal of generating a hardware *circuit* that can be simulated, synthesized, and so on. nMigen expressions are ordinary Python objects that represent parts of this circuit: +In contrast, code written in the Amaranth language *describes* computations on abstract objects, like :ref:`signals `, with the goal of generating a hardware *circuit* that can be simulated, synthesized, and so on. Amaranth expressions are ordinary Python objects that represent parts of this circuit: .. doctest:: @@ -372,7 +372,7 @@ In contrast, code written in the nMigen language *describes* computations on abs >>> a + 1 (+ (sig a) (const 1'd1)) -Although the syntax is similar, it is important to remember that nMigen values exist on a higher level of abstraction than Python values. For example, expressions that include nMigen values cannot be used in Python control flow structures: +Although the syntax is similar, it is important to remember that Amaranth values exist on a higher level of abstraction than Python values. For example, expressions that include Amaranth values cannot be used in Python control flow structures: .. doctest:: @@ -380,9 +380,9 @@ Although the syntax is similar, it is important to remember that nMigen values e ... print("Zero!") Traceback (most recent call last): ... - TypeError: Attempted to convert nMigen value to Python boolean + TypeError: Attempted to convert Amaranth value to Python boolean -Because the value of ``a``, and therefore ``a == 0``, is not known at the time when the ``if`` statement is executed, there is no way to decide whether the body of the statement should be executed---in fact, if the design is synthesized, by the time ``a`` has any concrete value, the Python program has long finished! To solve this problem, nMigen provides its own :ref:`control structures ` that, also, manipulate circuits. +Because the value of ``a``, and therefore ``a == 0``, is not known at the time when the ``if`` statement is executed, there is no way to decide whether the body of the statement should be executed---in fact, if the design is synthesized, by the time ``a`` has any concrete value, the Python program has long finished! To solve this problem, Amaranth provides its own :ref:`control structures ` that, also, manipulate circuits. .. _lang-widthext: @@ -398,9 +398,9 @@ Many of the operations described below (for example, addition, equality, bitwise Arithmetic operators -------------------- -Most arithmetic operations on integers provided by Python can be used on nMigen values, too. +Most arithmetic operations on integers provided by Python can be used on Amaranth values, too. -Although Python integers have unlimited precision and nMigen values are represented with a :ref:`finite amount of bits `, arithmetics on nMigen values never overflows because the width of the arithmetic expression is always sufficient to represent all possible results. +Although Python integers have unlimited precision and Amaranth values are represented with a :ref:`finite amount of bits `, arithmetics on Amaranth values never overflows because the width of the arithmetic expression is always sufficient to represent all possible results. .. doctest:: @@ -408,7 +408,7 @@ Although Python integers have unlimited precision and nMigen values are represen >>> (a + 1).shape() # needs to represent 1 to 256 unsigned(9) -Similarly, although Python integers are always signed and nMigen values can be either :ref:`signed or unsigned `, if any of the operands of an nMigen arithmetic expression is signed, the expression itself is also signed, matching the behavior of Python. +Similarly, although Python integers are always signed and Amaranth values can be either :ref:`signed or unsigned `, if any of the operands of an Amaranth arithmetic expression is signed, the expression itself is also signed, matching the behavior of Python. .. doctest:: @@ -419,7 +419,7 @@ Similarly, although Python integers are always signed and nMigen values can be e While arithmetic computations never result in an overflow, :ref:`assigning ` their results to signals may truncate the most significant bits. -The following table lists the arithmetic operations provided by nMigen: +The following table lists the arithmetic operations provided by Amaranth: ============ ========================== ====== Operation Description Notes @@ -433,7 +433,7 @@ Operation Description Notes ``abs(a)`` absolute value ============ ========================== ====== -.. [#opA1] Divisor must be unsigned; this is an nMigen limitation that may be lifted in the future. +.. [#opA1] Divisor must be unsigned; this is an Amaranth limitation that may be lifted in the future. .. _lang-cmpops: @@ -441,11 +441,11 @@ Operation Description Notes Comparison operators -------------------- -All comparison operations on integers provided by Python can be used on nMigen values. However, due to a limitation of Python, chained comparisons (e.g. ``a < b < c``) cannot be used. +All comparison operations on integers provided by Python can be used on Amaranth values. However, due to a limitation of Python, chained comparisons (e.g. ``a < b < c``) cannot be used. Similar to arithmetic operations, if any operand of a comparison expression is signed, a signed comparison is performed. The result of a comparison is a 1-bit unsigned value. -The following table lists the comparison operations provided by nMigen: +The following table lists the comparison operations provided by Amaranth: ============ ========================== Operation Description @@ -464,13 +464,13 @@ Operation Description Bitwise, shift, and rotate operators ------------------------------------ -All bitwise and shift operations on integers provided by Python can be used on nMigen values as well. +All bitwise and shift operations on integers provided by Python can be used on Amaranth values as well. Similar to arithmetic operations, if any operand of a bitwise expression is signed, the expression itself is signed as well. A shift expression is signed if the shifted value is signed. A rotate expression is always unsigned. Rotate operations with variable rotate amounts cannot be efficiently synthesized for non-power-of-2 widths of the rotated value. Because of that, the rotate operations are only provided for constant rotate amounts, specified as Python :py:class:`int`\ s. -The following table lists the bitwise and shift operations provided by nMigen: +The following table lists the bitwise and shift operations provided by Amaranth: ===================== ========================================== ====== Operation Description Notes @@ -497,14 +497,14 @@ Operation Description Notes .. note:: - Because nMigen ensures that the width of a variable left shift expression is wide enough to represent any possible result, variable left shift by a wide amount produces exponentially wider intermediate values, stressing the synthesis tools: + Because Amaranth ensures that the width of a variable left shift expression is wide enough to represent any possible result, variable left shift by a wide amount produces exponentially wider intermediate values, stressing the synthesis tools: .. doctest:: >>> (1 << C(0, 32)).shape() unsigned(4294967296) - Although nMigen will detect and reject expressions wide enough to break other tools, it is a good practice to explicitly limit the width of a shift amount in a variable left shift. + Although Amaranth will detect and reject expressions wide enough to break other tools, it is a good practice to explicitly limit the width of a shift amount in a variable left shift. .. _lang-reduceops: @@ -516,7 +516,7 @@ Bitwise reduction operations on integers are not provided by Python, but are ver The result of a reduction is a 1-bit unsigned value. -The following table lists the reduction operations provided by nMigen: +The following table lists the reduction operations provided by Amaranth: ============ ============================================= ====== Operation Description Notes @@ -536,12 +536,12 @@ Operation Description Notes Logical operators ----------------- -Unlike the arithmetic or bitwise operators, it is not possible to change the behavior of the Python logical operators ``not``, ``and``, and ``or``. Due to that, logical expressions in nMigen are written using bitwise operations on boolean (1-bit unsigned) values, with explicit boolean conversions added where necessary. +Unlike the arithmetic or bitwise operators, it is not possible to change the behavior of the Python logical operators ``not``, ``and``, and ``or``. Due to that, logical expressions in Amaranth are written using bitwise operations on boolean (1-bit unsigned) values, with explicit boolean conversions added where necessary. -The following table lists the Python logical expressions and their nMigen equivalents: +The following table lists the Python logical expressions and their Amaranth equivalents: ================= ==================================== -Python expression nMigen expression (any operands) +Python expression Amaranth expression (any operands) ================= ==================================== ``not a`` ``~(a).bool()`` ``a and b`` ``(a).bool() & (b).bool()`` @@ -551,7 +551,7 @@ Python expression nMigen expression (any operands) When the operands are known to be boolean values, such as comparisons, reductions, or boolean signals, the ``.bool()`` conversion may be omitted for clarity: ================= ==================================== -Python expression nMigen expression (boolean operands) +Python expression Amaranth expression (boolean operands) ================= ==================================== ``not p`` ``~(p)`` ``p and q`` ``(p) & (q)`` @@ -562,9 +562,9 @@ Python expression nMigen expression (boolean operands) .. warning:: - Because of Python :ref:`operator precedence `, logical operators bind less tightly than comparison operators whereas bitwise operators bind more tightly than comparison operators. As a result, all logical expressions in nMigen **must** have parenthesized operands. + Because of Python :ref:`operator precedence `, logical operators bind less tightly than comparison operators whereas bitwise operators bind more tightly than comparison operators. As a result, all logical expressions in Amaranth **must** have parenthesized operands. - Omitting parentheses around operands in an nMigen a logical expression is likely to introduce a subtle bug: + Omitting parentheses around operands in an Amaranth a logical expression is likely to introduce a subtle bug: .. doctest:: @@ -581,7 +581,7 @@ Python expression nMigen expression (boolean operands) .. warning:: - When applied to nMigen boolean values, the ``~`` operator computes negation, and when applied to Python boolean values, the ``not`` operator also computes negation. However, the ``~`` operator applied to Python boolean values produces an unexpected result: + When applied to Amaranth boolean values, the ``~`` operator computes negation, and when applied to Python boolean values, the ``not`` operator also computes negation. However, the ``~`` operator applied to Python boolean values produces an unexpected result: .. doctest:: @@ -590,7 +590,7 @@ Python expression nMigen expression (boolean operands) >>> ~True -2 - Because of this, Python booleans used in nMigen logical expressions **must** be negated with the ``not`` operator, not the ``~`` operator. Negating a Python boolean with the ``~`` operator in an nMigen logical expression is likely to introduce a subtle bug: + Because of this, Python booleans used in Amaranth logical expressions **must** be negated with the ``not`` operator, not the ``~`` operator. Negating a Python boolean with the ``~`` operator in an Amaranth logical expression is likely to introduce a subtle bug: .. doctest:: @@ -601,7 +601,7 @@ Python expression nMigen expression (boolean operands) >>> ~use_stb | stb # WRONG! MSB of 2-bit wide OR expression is always 1 (| (const 2'sd-2) (sig stb)) - nMigen automatically detects some cases of misuse of ``~`` and emits a detailed diagnostic message. + Amaranth automatically detects some cases of misuse of ``~`` and emits a detailed diagnostic message. .. TODO: this isn't quite reliable, #380 @@ -611,13 +611,13 @@ Python expression nMigen expression (boolean operands) Bit sequence operators ---------------------- -Apart from acting as numbers, nMigen values can also be treated as bit :ref:`sequences `, supporting slicing, concatenation, replication, and other sequence operations. Since some of the operators Python defines for sequences clash with the operators it defines for numbers, nMigen gives these operators a different name. Except for the names, nMigen values follow Python sequence semantics, with the least significant bit at index 0. +Apart from acting as numbers, Amaranth values can also be treated as bit :ref:`sequences `, supporting slicing, concatenation, replication, and other sequence operations. Since some of the operators Python defines for sequences clash with the operators it defines for numbers, Amaranth gives these operators a different name. Except for the names, Amaranth values follow Python sequence semantics, with the least significant bit at index 0. -Because every nMigen value has a single fixed width, bit slicing and replication operations require the subscripts and count to be constant, specified as Python :py:class:`int`\ s. It is often useful to slice a value with a constant width and variable offset, but this cannot be expressed with the Python slice notation. To solve this problem, nMigen provides additional *part select* operations with the necessary semantics. +Because every Amaranth value has a single fixed width, bit slicing and replication operations require the subscripts and count to be constant, specified as Python :py:class:`int`\ s. It is often useful to slice a value with a constant width and variable offset, but this cannot be expressed with the Python slice notation. To solve this problem, Amaranth provides additional *part select* operations with the necessary semantics. The result of any bit sequence operation is an unsigned value. -The following table lists the bit sequence operations provided by nMigen: +The following table lists the bit sequence operations provided by Amaranth: ======================= ================================================ ====== Operation Description Notes @@ -631,14 +631,14 @@ Operation Description Notes ``Repl(a, n)`` replication ======================= ================================================ ====== -.. [#opS1] Words "length" and "width" have the same meaning when talking about nMigen values. Conventionally, "width" is used. +.. [#opS1] Words "length" and "width" have the same meaning when talking about Amaranth values. Conventionally, "width" is used. .. [#opS2] All variations of the Python slice notation are supported, including "extended slicing". E.g. all of ``a[0]``, ``a[1:9]``, ``a[2:]``, ``a[:-2]``, ``a[::-1]``, ``a[0:8:2]`` select bits in the same way as other Python sequence types select their elements. .. [#opS3] In the concatenated value, ``a`` occupies the least significant bits, and ``b`` the most significant bits. -For the operators introduced by nMigen, the following table explains them in terms of Python code operating on tuples of bits rather than nMigen values: +For the operators introduced by Amaranth, the following table explains them in terms of Python code operating on tuples of bits rather than Amaranth values: ======================= ====================== -nMigen operation Equivalent Python code +Amaranth operation Equivalent Python code ======================= ====================== ``Cat(a, b)`` ``a + b`` ``Repl(a, n)`` ``a * n`` @@ -654,7 +654,7 @@ nMigen operation Equivalent Python code .. note:: - Could nMigen have used a different indexing or iteration order for values? Yes, but it would be necessary to either place the most significant bit at index 0, or deliberately break the Python sequence type interface. Both of these options would cause more issues than using different iteration orders for numeric and sequence operations. + Could Amaranth have used a different indexing or iteration order for values? Yes, but it would be necessary to either place the most significant bit at index 0, or deliberately break the Python sequence type interface. Both of these options would cause more issues than using different iteration orders for numeric and sequence operations. .. _lang-convops: @@ -664,7 +664,7 @@ Conversion operators The ``.as_signed()`` and ``.as_unsigned()`` conversion operators reinterpret the bits of a value with the requested signedness. This is useful when the same value is sometimes treated as signed and sometimes as unsigned, or when a signed value is constructed using slices or concatenations. For example, ``(pc + imm[:7].as_signed()).as_unsigned()`` sign-extends the 7 least significant bits of ``imm`` to the width of ``pc``, performs the addition, and produces an unsigned result. -.. TODO: more general shape conversion? https://github.com/nmigen/nmigen/issues/381 +.. TODO: more general shape conversion? https://github.com/amaranth-lang/amaranth/issues/381 .. _lang-muxop: @@ -680,12 +680,12 @@ The ``Mux(sel, val1, val0)`` choice expression (similar to the :ref:`conditional Modules ======= -A *module* is a unit of the nMigen design hierarchy: the smallest collection of logic that can be independently simulated, synthesized, or otherwise processed. Modules associate signals with :ref:`control domains `, provide :ref:`control structures `, manage clock domains, and aggregate submodules. +A *module* is a unit of the Amaranth design hierarchy: the smallest collection of logic that can be independently simulated, synthesized, or otherwise processed. Modules associate signals with :ref:`control domains `, provide :ref:`control structures `, manage clock domains, and aggregate submodules. .. TODO: link to clock domains .. TODO: link to submodules -Every nMigen design starts with a fresh module: +Every Amaranth design starts with a fresh module: .. doctest:: @@ -721,7 +721,7 @@ Assigning to signals >>> s.eq(1) (eq (sig s) (const 1'd1)) -Similar to :ref:`how nMigen operators work `, an nMigen assignment is an ordinary Python object used to describe a part of a circuit. An assignment does not have any effect on the signal it changes until it is added to a control domain in a module. Once added, it introduces logic into the circuit generated from that module. +Similar to :ref:`how Amaranth operators work `, an Amaranth assignment is an ordinary Python object used to describe a part of a circuit. An assignment does not have any effect on the signal it changes until it is added to a control domain in a module. Once added, it introduces logic into the circuit generated from that module. .. _lang-assignlhs: @@ -783,11 +783,11 @@ Every signal included in the target of an assignment becomes a part of the domai >>> m.d.sync += d.eq(0) Traceback (most recent call last): ... - nmigen.hdl.dsl.SyntaxError: Driver-driver conflict: trying to drive (sig d) from d.sync, but it is already driven from d.comb + amaranth.hdl.dsl.SyntaxError: Driver-driver conflict: trying to drive (sig d) from d.sync, but it is already driven from d.comb .. note:: - Clearly, nMigen code that drives a single bit of a signal from two different domains does not describe a meaningful circuit. However, driving two different bits of a signal from two different domains does not inherently cause such a conflict. Would nMigen accept the following code? + Clearly, Amaranth code that drives a single bit of a signal from two different domains does not describe a meaningful circuit. However, driving two different bits of a signal from two different domains does not inherently cause such a conflict. Would Amaranth accept the following code? .. testcode:: @@ -844,7 +844,7 @@ Multiple assignments to the same signal bits are more useful when combined with Control structures ------------------ -Although it is possible to write any decision tree as a combination of :ref:`assignments ` and :ref:`choice expressions `, nMigen provides *control structures* tailored for this task: If, Switch, and FSM. The syntax of all control structures is based on :ref:`context managers ` and uses ``with`` blocks, for example: +Although it is possible to write any decision tree as a combination of :ref:`assignments ` and :ref:`choice expressions `, Amaranth provides *control structures* tailored for this task: If, Switch, and FSM. The syntax of all control structures is based on :ref:`context managers ` and uses ``with`` blocks, for example: .. TODO: link to relevant subsections @@ -856,14 +856,14 @@ Although it is possible to write any decision tree as a combination of :ref:`ass with m.Else(): m.d.sync += timer.eq(timer - 1) -While some nMigen control structures are superficially similar to imperative control flow statements (such as Python's ``if``), their function---together with :ref:`expressions ` and :ref:`assignments `---is to describe circuits. The code above is equivalent to: +While some Amaranth control structures are superficially similar to imperative control flow statements (such as Python's ``if``), their function---together with :ref:`expressions ` and :ref:`assignments `---is to describe circuits. The code above is equivalent to: .. testcode:: timer = Signal(8) m.d.sync += timer.eq(Mux(timer == 0, 10, timer - 1)) -Because all branches of a decision tree affect the generated circuit, all of the Python code inside nMigen control structures is always evaluated in the order in which it appears in the program. This can be observed through Python code with side effects, such as ``print()``: +Because all branches of a decision tree affect the generated circuit, all of the Python code inside Amaranth control structures is always evaluated in the order in which it appears in the program. This can be observed through Python code with side effects, such as ``print()``: .. testcode:: @@ -886,7 +886,7 @@ Because all branches of a decision tree affect the generated circuit, all of the Active and inactive assignments ------------------------------- -An assignment added inside an nMigen control structure, i.e. ``with m.<...>:`` block, is *active* if the condition of the control structure is satisfied, and *inactive* otherwise. For any given set of conditions, the final value of every signal assigned in a module is the same as if the inactive assignments were removed and the active assignments were performed unconditionally, taking into account the :ref:`assignment order `. +An assignment added inside an Amaranth control structure, i.e. ``with m.<...>:`` block, is *active* if the condition of the control structure is satisfied, and *inactive* otherwise. For any given set of conditions, the final value of every signal assigned in a module is the same as if the inactive assignments were removed and the active assignments were performed unconditionally, taking into account the :ref:`assignment order `. For example, there are two possible cases in the circuit generated from the following code: @@ -945,15 +945,15 @@ Consider the following code: Whenever the signals ``en`` or ``b`` change, the signal ``a`` changes as well. If ``en`` is false, the final value of ``a`` is its initial value, ``1``. If ``en`` is true, the final value of ``a`` is equal to ``b + 1``. -A combinatorial signal that is computed directly or indirectly based on its own value is a part of a *combinatorial feedback loop*, sometimes shortened to just *feedback loop*. Combinatorial feedback loops can be stable (i.e. implement a constant driver or a transparent latch), or unstable (i.e. implement a ring oscillator). nMigen prohibits using assignments to describe any kind of a combinatorial feedback loop, including transparent latches. +A combinatorial signal that is computed directly or indirectly based on its own value is a part of a *combinatorial feedback loop*, sometimes shortened to just *feedback loop*. Combinatorial feedback loops can be stable (i.e. implement a constant driver or a transparent latch), or unstable (i.e. implement a ring oscillator). Amaranth prohibits using assignments to describe any kind of a combinatorial feedback loop, including transparent latches. .. warning:: - The current version of nMigen does not detect combinatorial feedback loops, but processes the design under the assumption that there aren't any. If the design does in fact contain a combinatorial feedback loop, it will likely be **silently miscompiled**, though some cases will be detected during synthesis or place & route. + The current version of Amaranth does not detect combinatorial feedback loops, but processes the design under the assumption that there aren't any. If the design does in fact contain a combinatorial feedback loop, it will likely be **silently miscompiled**, though some cases will be detected during synthesis or place & route. This hazard will be eliminated in the future. -.. TODO: fix this, either as a part of https://github.com/nmigen/nmigen/issues/6 or on its own +.. TODO: fix this, either as a part of https://github.com/amaranth-lang/amaranth/issues/6 or on its own .. note:: diff --git a/docs/start.rst b/docs/start.rst index 972a287..fb5fb0e 100644 --- a/docs/start.rst +++ b/docs/start.rst @@ -1,7 +1,7 @@ Getting started ############### -This section demonstrates the basic nMigen workflow to provide a cursory overview of the language and the toolchain. See the :doc:`tutorial ` for a step-by-step introduction to the language, and the :doc:`language guide ` for a detailed explanation of every language construct. +This section demonstrates the basic Amaranth workflow to provide a cursory overview of the language and the toolchain. See the :doc:`tutorial ` for a step-by-step introduction to the language, and the :doc:`language guide ` for a detailed explanation of every language construct. .. TODO: add link to build system doc .. TODO: add link to more complex examples? @@ -20,14 +20,14 @@ As a first example, consider a counter with a fixed limit, enable, and overflow. Implementing a counter ---------------------- -A 16-bit up counter with enable input, overflow output, and a limit fixed at design time can be implemented in nMigen as follows: +A 16-bit up counter with enable input, overflow output, and a limit fixed at design time can be implemented in Amaranth as follows: .. literalinclude:: _code/up_counter.py :linenos: :lineno-match: :end-before: # --- TEST --- -The reusable building block of nMigen designs is an ``Elaboratable``: a Python class that includes HDL signals (``en`` and ``ovf``, in this case) as a part of its interface, and provides the ``elaborate`` method that defines its behavior. +The reusable building block of Amaranth designs is an ``Elaboratable``: a Python class that includes HDL signals (``en`` and ``ovf``, in this case) as a part of its interface, and provides the ``elaborate`` method that defines its behavior. .. TODO: link to Elaboratable reference @@ -60,7 +60,7 @@ When run, the test bench finishes successfully, since all of the assertions hold Converting a counter -------------------- -Although some nMigen workflows do not include Verilog at all, it is still the de facto standard for HDL interoperability. Any nMigen design can be converted to synthesizable Verilog using the corresponding backend: +Although some Amaranth workflows do not include Verilog at all, it is still the de facto standard for HDL interoperability. Any Amaranth design can be converted to synthesizable Verilog using the corresponding backend: .. literalinclude:: _code/up_counter.py :linenos: @@ -75,7 +75,7 @@ The signals that will be connected to the ports of the top-level Verilog module :language: verilog :linenos: -To aid debugging, the generated Verilog code has the same general structure as the nMigen source code (although more verbose), and contains extensive source location information. +To aid debugging, the generated Verilog code has the same general structure as the Amaranth source code (although more verbose), and contains extensive source location information. .. note:: @@ -85,14 +85,14 @@ To aid debugging, the generated Verilog code has the same general structure as t A blinking LED ============== -Although nMigen works well as a standalone HDL, it also includes a build system that integrates with FPGA toolchains, and many board definition files for common developer boards that include pinouts and programming adapter invocations. The following code will blink a LED with a frequency of 1 Hz on any board that has a LED and an oscillator: +Although Amaranth works well as a standalone HDL, it also includes a build system that integrates with FPGA toolchains, and many board definition files for common developer boards that include pinouts and programming adapter invocations. The following code will blink a LED with a frequency of 1 Hz on any board that has a LED and an oscillator: .. literalinclude:: _code/led_blinker.py :linenos: :lineno-match: :end-before: # --- BUILD --- -The ``LEDBlinker`` module will use the first LED available on the board, and derive the clock divisor from the oscillator frequency specified in the clock constraint. It can be used, for example, with the `Lattice iCEStick evaluation board `_, one of the many boards already supported by nMigen: +The ``LEDBlinker`` module will use the first LED available on the board, and derive the clock divisor from the oscillator frequency specified in the clock constraint. It can be used, for example, with the `Lattice iCEStick evaluation board `_, one of the many boards already supported by Amaranth: .. TODO: link to list of supported boards @@ -105,7 +105,7 @@ The ``LEDBlinker`` module will use the first LED available on the board, and der :lineno-match: :start-after: # --- BUILD --- -With only a single line of code, the design is synthesized, placed, routed, and programmed to the on-board Flash memory. Although not all applications will use the nMigen build system, the designs that choose it can benefit from the "turnkey" built-in workflows; if necessary, the built-in workflows can be customized to include user-specified options, commands, and files. +With only a single line of code, the design is synthesized, placed, routed, and programmed to the on-board Flash memory. Although not all applications will use the Amaranth build system, the designs that choose it can benefit from the "turnkey" built-in workflows; if necessary, the built-in workflows can be customized to include user-specified options, commands, and files. .. TODO: link to build system reference @@ -115,6 +115,6 @@ With only a single line of code, the design is synthesized, placed, routed, and .. code-block:: shell - $ python3 -m nmigen_boards.icestick + $ python3 -m amaranth_boards.icestick This command will build and program a test bitstream similar to the example above. diff --git a/docs/tutorial.rst b/docs/tutorial.rst index 407b4cf..a937f61 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -3,7 +3,7 @@ Tutorial .. todo:: - The official tutorial is still being written. Until it's ready, consider following one of the tutorials written by the nMigen community: + The official tutorial is still being written. Until it's ready, consider following one of the tutorials written by the Amaranth community: * `Learning FPGA Design with nMigen `_ by Vivonomicon; * `nMigen Step by Step Tutorial `_ by LambdaConcept; diff --git a/examples/basic/alu.py b/examples/basic/alu.py index 5c0e8e6..af5b26a 100644 --- a/examples/basic/alu.py +++ b/examples/basic/alu.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class ALU(Elaboratable): diff --git a/examples/basic/alu_hier.py b/examples/basic/alu_hier.py index a3273af..4dbd6de 100644 --- a/examples/basic/alu_hier.py +++ b/examples/basic/alu_hier.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class Adder(Elaboratable): diff --git a/examples/basic/arst.py b/examples/basic/arst.py index 58b4015..1089b94 100644 --- a/examples/basic/arst.py +++ b/examples/basic/arst.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class ClockDivisor(Elaboratable): diff --git a/examples/basic/cdc.py b/examples/basic/cdc.py index 658d075..786a98f 100644 --- a/examples/basic/cdc.py +++ b/examples/basic/cdc.py @@ -1,6 +1,6 @@ -from nmigen import * -from nmigen.lib.cdc import FFSynchronizer -from nmigen.cli import main +from amaranth import * +from amaranth.lib.cdc import FFSynchronizer +from amaranth.cli import main i, o = Signal(name="i"), Signal(name="o") diff --git a/examples/basic/ctr.py b/examples/basic/ctr.py index 044cc53..4e44773 100644 --- a/examples/basic/ctr.py +++ b/examples/basic/ctr.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class Counter(Elaboratable): diff --git a/examples/basic/ctr_en.py b/examples/basic/ctr_en.py index ba63737..833b8bd 100644 --- a/examples/basic/ctr_en.py +++ b/examples/basic/ctr_en.py @@ -1,6 +1,6 @@ -from nmigen import * -from nmigen.sim import * -from nmigen.back import verilog +from amaranth import * +from amaranth.sim import * +from amaranth.back import verilog class Counter(Elaboratable): diff --git a/examples/basic/fsm.py b/examples/basic/fsm.py index 712718b..8aea5af 100644 --- a/examples/basic/fsm.py +++ b/examples/basic/fsm.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class UARTReceiver(Elaboratable): diff --git a/examples/basic/gpio.py b/examples/basic/gpio.py index 6905db4..408404a 100644 --- a/examples/basic/gpio.py +++ b/examples/basic/gpio.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class GPIO(Elaboratable): diff --git a/examples/basic/inst.py b/examples/basic/inst.py index 2fc519b..19d5d9d 100644 --- a/examples/basic/inst.py +++ b/examples/basic/inst.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class System(Elaboratable): diff --git a/examples/basic/mem.py b/examples/basic/mem.py index 82105fc..14bb661 100644 --- a/examples/basic/mem.py +++ b/examples/basic/mem.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class RegisterFile(Elaboratable): diff --git a/examples/basic/pmux.py b/examples/basic/pmux.py index 1e938b5..2fc7b0c 100644 --- a/examples/basic/pmux.py +++ b/examples/basic/pmux.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class ParMux(Elaboratable): diff --git a/examples/basic/por.py b/examples/basic/por.py index 8bdc020..1ed2773 100644 --- a/examples/basic/por.py +++ b/examples/basic/por.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main m = Module() diff --git a/examples/basic/sel.py b/examples/basic/sel.py index da45423..7f480ac 100644 --- a/examples/basic/sel.py +++ b/examples/basic/sel.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.cli import main +from amaranth import * +from amaranth.cli import main class FlatGPIO(Elaboratable): diff --git a/examples/basic/uart.py b/examples/basic/uart.py index 2a3b106..3c99021 100644 --- a/examples/basic/uart.py +++ b/examples/basic/uart.py @@ -1,4 +1,4 @@ -from nmigen import * +from amaranth import * class UART(Elaboratable): @@ -103,7 +103,7 @@ if __name__ == "__main__": args = parser.parse_args() if args.action == "simulate": - from nmigen.sim import Simulator, Passive + from amaranth.sim import Simulator, Passive sim = Simulator(uart) sim.add_clock(1e-6) @@ -141,6 +141,6 @@ if __name__ == "__main__": sim.run() if args.action == "generate": - from nmigen.back import verilog + from amaranth.back import verilog print(verilog.convert(uart, ports=ports)) diff --git a/examples/board/01_blinky.py b/examples/board/01_blinky.py index 0a21a00..6e43ceb 100644 --- a/examples/board/01_blinky.py +++ b/examples/board/01_blinky.py @@ -1,8 +1,8 @@ -# If the design does not create a "sync" clock domain, it is created by the nMigen build system +# If the design does not create a "sync" clock domain, it is created by the Amaranth build system # using the platform default clock (and default reset, if any). -from nmigen import * -from nmigen_boards.ice40_hx1k_blink_evn import * +from amaranth import * +from amaranth_boards.ice40_hx1k_blink_evn import * class Blinky(Elaboratable): diff --git a/examples/board/02_domain.py b/examples/board/02_domain.py index d359334..62ae565 100644 --- a/examples/board/02_domain.py +++ b/examples/board/02_domain.py @@ -2,8 +2,8 @@ # explicitly, which overrides the default behavior. Any other clock domains could also be # independently created in addition to the main "sync" domain. -from nmigen import * -from nmigen_boards.ice40_hx1k_blink_evn import * +from amaranth import * +from amaranth_boards.ice40_hx1k_blink_evn import * class BlinkyWithDomain(Elaboratable): diff --git a/nmigen/__init__.py b/nmigen/__init__.py index 7f99c4a..88849c7 100644 --- a/nmigen/__init__.py +++ b/nmigen/__init__.py @@ -1,26 +1,7 @@ -try: - try: - from importlib import metadata as importlib_metadata # py3.8+ stdlib - except ImportError: - import importlib_metadata # py3.7- shim - __version__ = importlib_metadata.version(__package__) -except ImportError: - # No importlib_metadata. This shouldn't normally happen, but some people prefer not installing - # packages via pip at all, instead using PYTHONPATH directly or copying the package files into - # `lib/pythonX.Y/site-packages`. Although not a recommended way, we still try to support it. - __version__ = "unknown" # :nocov: +from amaranth import * +from amaranth import __all__ -from .hdl import * - - -__all__ = [ - "Shape", "unsigned", "signed", - "Value", "Const", "C", "Mux", "Cat", "Repl", "Array", "Signal", "ClockSignal", "ResetSignal", - "Module", - "ClockDomain", - "Elaboratable", "Fragment", "Instance", - "Memory", - "Record", - "DomainRenamer", "ResetInserter", "EnableInserter", -] +import warnings +warnings.warn("instead of nmigen, use amaranth", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/asserts.py b/nmigen/asserts.py index b0e97b9..b9926b7 100644 --- a/nmigen/asserts.py +++ b/nmigen/asserts.py @@ -1,2 +1,6 @@ -from .hdl.ast import AnyConst, AnySeq, Assert, Assume, Cover -from .hdl.ast import Past, Stable, Rose, Fell, Initial +from amaranth.asserts import * + + +import warnings +warnings.warn("instead of nmigen.asserts, use amaranth.asserts", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/back/__init__.py b/nmigen/back/__init__.py index e69de29..417612a 100644 --- a/nmigen/back/__init__.py +++ b/nmigen/back/__init__.py @@ -0,0 +1,6 @@ +from amaranth.back import * + + +import warnings +warnings.warn("instead of nmigen.back, use amaranth.back", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/back/cxxrtl.py b/nmigen/back/cxxrtl.py index 809a136..5ac9025 100644 --- a/nmigen/back/cxxrtl.py +++ b/nmigen/back/cxxrtl.py @@ -1,41 +1,7 @@ -from .._toolchain.yosys import * -from . import rtlil +from amaranth.back.cxxrtl import * +from amaranth.back.cxxrtl import __all__ -__all__ = ["YosysError", "convert", "convert_fragment"] - - -def _convert_rtlil_text(rtlil_text, black_boxes, *, src_loc_at=0): - if black_boxes is not None: - if not isinstance(black_boxes, dict): - raise TypeError("CXXRTL black boxes must be a dictionary, not {!r}" - .format(black_boxes)) - for box_name, box_source in black_boxes.items(): - if not isinstance(box_name, str): - raise TypeError("CXXRTL black box name must be a string, not {!r}" - .format(box_name)) - if not isinstance(box_source, str): - raise TypeError("CXXRTL black box source code must be a string, not {!r}" - .format(box_source)) - - yosys = find_yosys(lambda ver: ver >= (0, 9, 3468)) - - script = [] - if black_boxes is not None: - for box_name, box_source in black_boxes.items(): - script.append("read_ilang < 2 ** 16: - raise ImplementationLimit("Wire created at {} is {} bits wide, which is unlikely to " - "synthesize correctly" - .format(src or "unknown location", width)) - - self._attributes(attrs, src=src, indent=1) - name = self._make_name(name, local=False) - if port_id is None: - self._append(" wire width {} {}\n", width, name) - else: - assert port_kind in ("input", "output", "inout") - self._append(" wire width {} {} {} {}\n", width, port_kind, port_id, name) - return name - - def connect(self, lhs, rhs): - self._append(" connect {} {}\n", lhs, rhs) - - def memory(self, width, size, name=None, attrs={}, src=""): - self._attributes(attrs, src=src, indent=1) - name = self._make_name(name, local=False) - self._append(" memory width {} size {} {}\n", width, size, name) - return name - - def cell(self, kind, name=None, params={}, ports={}, attrs={}, src=""): - self._attributes(attrs, src=src, indent=1) - name = self._make_name(name, local=False) - self._append(" cell {} {}\n", kind, name) - for param, value in params.items(): - if isinstance(value, float): - self._append(" parameter real \\{} \"{!r}\"\n", - param, value) - elif _signed(value): - self._append(" parameter signed \\{} {}\n", - param, _const(value)) - else: - self._append(" parameter \\{} {}\n", - param, _const(value)) - for port, wire in ports.items(): - self._append(" connect {} {}\n", port, wire) - self._append(" end\n") - return name - - def process(self, name=None, attrs={}, src=""): - name = self._make_name(name, local=True) - return _ProcessBuilder(self, name, attrs, src) - - -class _ProcessBuilder(_BufferedBuilder, _AttrBuilder): - def __init__(self, rtlil, name, attrs, src): - super().__init__() - self.rtlil = rtlil - self.name = name - self.attrs = {} - self.src = src - - def __enter__(self): - self._attributes(self.attrs, src=self.src, indent=1) - self._append(" process {}\n", self.name) - return self - - def __exit__(self, *args): - self._append(" end\n") - self.rtlil._buffer.write(str(self)) - - def case(self): - return _CaseBuilder(self, indent=2) - - def sync(self, kind, cond=None): - return _SyncBuilder(self, kind, cond) - - -class _CaseBuilder(_ProxiedBuilder): - def __init__(self, rtlil, indent): - self.rtlil = rtlil - self.indent = indent - - def __enter__(self): - return self - - def __exit__(self, *args): - pass - - def assign(self, lhs, rhs): - self._append("{}assign {} {}\n", " " * self.indent, lhs, rhs) - - def switch(self, cond, attrs={}, src=""): - return _SwitchBuilder(self.rtlil, cond, attrs, src, self.indent) - - -class _SwitchBuilder(_ProxiedBuilder, _AttrBuilder): - def __init__(self, rtlil, cond, attrs, src, indent): - self.rtlil = rtlil - self.cond = cond - self.attrs = attrs - self.src = src - self.indent = indent - - def __enter__(self): - self._attributes(self.attrs, src=self.src, indent=self.indent) - self._append("{}switch {}\n", " " * self.indent, self.cond) - return self - - def __exit__(self, *args): - self._append("{}end\n", " " * self.indent) - - def case(self, *values, attrs={}, src=""): - self._attributes(attrs, src=src, indent=self.indent + 1) - if values == (): - self._append("{}case\n", " " * (self.indent + 1)) - else: - self._append("{}case {}\n", " " * (self.indent + 1), - ", ".join("{}'{}".format(len(value), value) for value in values)) - return _CaseBuilder(self.rtlil, self.indent + 2) - - -class _SyncBuilder(_ProxiedBuilder): - def __init__(self, rtlil, kind, cond): - self.rtlil = rtlil - self.kind = kind - self.cond = cond - - def __enter__(self): - if self.cond is None: - self._append(" sync {}\n", self.kind) - else: - self._append(" sync {} {}\n", self.kind, self.cond) - return self - - def __exit__(self, *args): - pass - - def update(self, lhs, rhs): - self._append(" update {} {}\n", lhs, rhs) - - -def _src(src_loc): - if src_loc is None: - return None - file, line = src_loc - return "{}:{}".format(file, line) - - -class _LegalizeValue(Exception): - def __init__(self, value, branches, src_loc): - self.value = value - self.branches = list(branches) - self.src_loc = src_loc - - -class _ValueCompilerState: - def __init__(self, rtlil): - self.rtlil = rtlil - self.wires = ast.SignalDict() - self.driven = ast.SignalDict() - self.ports = ast.SignalDict() - self.anys = ast.ValueDict() - - self.expansions = ast.ValueDict() - - def add_driven(self, signal, sync): - self.driven[signal] = sync - - def add_port(self, signal, kind): - assert kind in ("i", "o", "io") - if kind == "i": - kind = "input" - elif kind == "o": - kind = "output" - elif kind == "io": - kind = "inout" - self.ports[signal] = (len(self.ports), kind) - - def resolve(self, signal, prefix=None): - if len(signal) == 0: - return "{ }", "{ }" - - if signal in self.wires: - return self.wires[signal] - - if signal in self.ports: - port_id, port_kind = self.ports[signal] - else: - port_id = port_kind = None - if prefix is not None: - wire_name = "{}_{}".format(prefix, signal.name) - else: - wire_name = signal.name - - attrs = dict(signal.attrs) - if signal._enum_class is not None: - attrs["enum_base_type"] = signal._enum_class.__name__ - for value in signal._enum_class: - attrs["enum_value_{:0{}b}".format(value.value, signal.width)] = value.name - - wire_curr = self.rtlil.wire(width=signal.width, name=wire_name, - port_id=port_id, port_kind=port_kind, - attrs=attrs, src=_src(signal.src_loc)) - if signal in self.driven and self.driven[signal]: - wire_next = self.rtlil.wire(width=signal.width, name=wire_curr + "$next", - src=_src(signal.src_loc)) - else: - wire_next = None - self.wires[signal] = (wire_curr, wire_next) - - return wire_curr, wire_next - - def resolve_curr(self, signal, prefix=None): - wire_curr, wire_next = self.resolve(signal, prefix) - return wire_curr - - def expand(self, value): - if not self.expansions: - return value - return self.expansions.get(value, value) - - @contextmanager - def expand_to(self, value, expansion): - try: - assert value not in self.expansions - self.expansions[value] = expansion - yield - finally: - del self.expansions[value] - - -class _ValueCompiler(xfrm.ValueVisitor): - def __init__(self, state): - self.s = state - - def on_unknown(self, value): - if value is None: - return None - else: - super().on_unknown(value) - - def on_ClockSignal(self, value): - raise NotImplementedError # :nocov: - - def on_ResetSignal(self, value): - raise NotImplementedError # :nocov: - - def on_Sample(self, value): - raise NotImplementedError # :nocov: - - def on_Initial(self, value): - raise NotImplementedError # :nocov: - - def on_Cat(self, value): - return "{{ {} }}".format(" ".join(reversed([self(o) for o in value.parts]))) - - def _prepare_value_for_Slice(self, value): - raise NotImplementedError # :nocov: - - def on_Slice(self, value): - if value.start == 0 and value.stop == len(value.value): - return self(value.value) - - if isinstance(value.value, ast.UserValue): - sigspec = self._prepare_value_for_Slice(value.value._lazy_lower()) - else: - sigspec = self._prepare_value_for_Slice(value.value) - - if value.start == value.stop: - return "{}" - elif value.start + 1 == value.stop: - return "{} [{}]".format(sigspec, value.start) - else: - return "{} [{}:{}]".format(sigspec, value.stop - 1, value.start) - - def on_ArrayProxy(self, value): - index = self.s.expand(value.index) - if isinstance(index, ast.Const): - if index.value < len(value.elems): - elem = value.elems[index.value] - else: - elem = value.elems[-1] - return self.match_shape(elem, *value.shape()) - else: - max_index = 1 << len(value.index) - max_elem = len(value.elems) - raise _LegalizeValue(value.index, range(min(max_index, max_elem)), value.src_loc) - - -class _RHSValueCompiler(_ValueCompiler): - operator_map = { - (1, "~"): "$not", - (1, "-"): "$neg", - (1, "b"): "$reduce_bool", - (1, "r|"): "$reduce_or", - (1, "r&"): "$reduce_and", - (1, "r^"): "$reduce_xor", - (2, "+"): "$add", - (2, "-"): "$sub", - (2, "*"): "$mul", - (2, "//"): "$div", - (2, "%"): "$mod", - (2, "**"): "$pow", - (2, "<<"): "$sshl", - (2, ">>"): "$sshr", - (2, "&"): "$and", - (2, "^"): "$xor", - (2, "|"): "$or", - (2, "=="): "$eq", - (2, "!="): "$ne", - (2, "<"): "$lt", - (2, "<="): "$le", - (2, ">"): "$gt", - (2, ">="): "$ge", - (3, "m"): "$mux", - } - - def on_value(self, value): - return super().on_value(self.s.expand(value)) - - def on_Const(self, value): - return _const(value) - - def on_AnyConst(self, value): - if value in self.s.anys: - return self.s.anys[value] - - res_bits, res_sign = value.shape() - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - self.s.rtlil.cell("$anyconst", ports={ - "\\Y": res, - }, params={ - "WIDTH": res_bits, - }, src=_src(value.src_loc)) - self.s.anys[value] = res - return res - - def on_AnySeq(self, value): - if value in self.s.anys: - return self.s.anys[value] - - res_bits, res_sign = value.shape() - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - self.s.rtlil.cell("$anyseq", ports={ - "\\Y": res, - }, params={ - "WIDTH": res_bits, - }, src=_src(value.src_loc)) - self.s.anys[value] = res - return res - - def on_Signal(self, value): - wire_curr, wire_next = self.s.resolve(value) - return wire_curr - - def on_Operator_unary(self, value): - arg, = value.operands - if value.operator in ("u", "s"): - # These operators don't change the bit pattern, only its interpretation. - return self(arg) - - arg_bits, arg_sign = arg.shape() - res_bits, res_sign = value.shape() - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - self.s.rtlil.cell(self.operator_map[(1, value.operator)], ports={ - "\\A": self(arg), - "\\Y": res, - }, params={ - "A_SIGNED": arg_sign, - "A_WIDTH": arg_bits, - "Y_WIDTH": res_bits, - }, src=_src(value.src_loc)) - return res - - def match_shape(self, value, new_bits, new_sign): - if isinstance(value, ast.Const): - return self(ast.Const(value.value, ast.Shape(new_bits, new_sign))) - - value_bits, value_sign = value.shape() - if new_bits <= value_bits: - return self(ast.Slice(value, 0, new_bits)) - - res = self.s.rtlil.wire(width=new_bits, src=_src(value.src_loc)) - self.s.rtlil.cell("$pos", ports={ - "\\A": self(value), - "\\Y": res, - }, params={ - "A_SIGNED": value_sign, - "A_WIDTH": value_bits, - "Y_WIDTH": new_bits, - }, src=_src(value.src_loc)) - return res - - def on_Operator_binary(self, value): - lhs, rhs = value.operands - lhs_bits, lhs_sign = lhs.shape() - rhs_bits, rhs_sign = rhs.shape() - if lhs_sign == rhs_sign or value.operator in ("<<", ">>", "**"): - lhs_wire = self(lhs) - rhs_wire = self(rhs) - else: - lhs_sign = rhs_sign = True - lhs_bits = rhs_bits = max(lhs_bits, rhs_bits) - lhs_wire = self.match_shape(lhs, lhs_bits, lhs_sign) - rhs_wire = self.match_shape(rhs, rhs_bits, rhs_sign) - res_bits, res_sign = value.shape() - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - self.s.rtlil.cell(self.operator_map[(2, value.operator)], ports={ - "\\A": lhs_wire, - "\\B": rhs_wire, - "\\Y": res, - }, params={ - "A_SIGNED": lhs_sign, - "A_WIDTH": lhs_bits, - "B_SIGNED": rhs_sign, - "B_WIDTH": rhs_bits, - "Y_WIDTH": res_bits, - }, src=_src(value.src_loc)) - if value.operator in ("//", "%"): - # RTLIL leaves division by zero undefined, but we require it to return zero. - divmod_res = res - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - self.s.rtlil.cell("$mux", ports={ - "\\A": divmod_res, - "\\B": self(ast.Const(0, ast.Shape(res_bits, res_sign))), - "\\S": self(rhs == 0), - "\\Y": res, - }, params={ - "WIDTH": res_bits - }, src=_src(value.src_loc)) - return res - - def on_Operator_mux(self, value): - sel, val1, val0 = value.operands - if len(sel) != 1: - sel = sel.bool() - val1_bits, val1_sign = val1.shape() - val0_bits, val0_sign = val0.shape() - res_bits, res_sign = value.shape() - val1_bits = val0_bits = res_bits = max(val1_bits, val0_bits, res_bits) - val1_wire = self.match_shape(val1, val1_bits, val1_sign) - val0_wire = self.match_shape(val0, val0_bits, val0_sign) - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - self.s.rtlil.cell("$mux", ports={ - "\\A": val0_wire, - "\\B": val1_wire, - "\\S": self(sel), - "\\Y": res, - }, params={ - "WIDTH": res_bits - }, src=_src(value.src_loc)) - return res - - def on_Operator(self, value): - if len(value.operands) == 1: - return self.on_Operator_unary(value) - elif len(value.operands) == 2: - return self.on_Operator_binary(value) - elif len(value.operands) == 3: - assert value.operator == "m" - return self.on_Operator_mux(value) - else: - raise TypeError # :nocov: - - def _prepare_value_for_Slice(self, value): - if isinstance(value, (ast.Signal, ast.Slice, ast.Cat)): - sigspec = self(value) - else: - sigspec = self.s.rtlil.wire(len(value), src=_src(value.src_loc)) - self.s.rtlil.connect(sigspec, self(value)) - return sigspec - - def on_Part(self, value): - lhs, rhs = value.value, value.offset - if value.stride != 1: - rhs *= value.stride - lhs_bits, lhs_sign = lhs.shape() - rhs_bits, rhs_sign = rhs.shape() - res_bits, res_sign = value.shape() - res = self.s.rtlil.wire(width=res_bits, src=_src(value.src_loc)) - # Note: Verilog's x[o+:w] construct produces a $shiftx cell, not a $shift cell. - # However, nMigen's semantics defines the out-of-range bits to be zero, so it is correct - # to use a $shift cell here instead, even though it produces less idiomatic Verilog. - self.s.rtlil.cell("$shift", ports={ - "\\A": self(lhs), - "\\B": self(rhs), - "\\Y": res, - }, params={ - "A_SIGNED": lhs_sign, - "A_WIDTH": lhs_bits, - "B_SIGNED": rhs_sign, - "B_WIDTH": rhs_bits, - "Y_WIDTH": res_bits, - }, src=_src(value.src_loc)) - return res - - def on_Repl(self, value): - return "{{ {} }}".format(" ".join(self(value.value) for _ in range(value.count))) - - -class _LHSValueCompiler(_ValueCompiler): - def on_Const(self, value): - raise TypeError # :nocov: - - def on_AnyConst(self, value): - raise TypeError # :nocov: - - def on_AnySeq(self, value): - raise TypeError # :nocov: - - def on_Operator(self, value): - raise TypeError # :nocov: - - def match_shape(self, value, new_bits, new_sign): - value_bits, value_sign = value.shape() - if new_bits == value_bits: - return self(value) - elif new_bits < value_bits: - return self(ast.Slice(value, 0, new_bits)) - else: # new_bits > value_bits - dummy_bits = new_bits - value_bits - dummy_wire = self.s.rtlil.wire(dummy_bits) - return "{{ {} {} }}".format(dummy_wire, self(value)) - - def on_Signal(self, value): - if value not in self.s.driven: - raise ValueError("No LHS wire for non-driven signal {}".format(repr(value))) - wire_curr, wire_next = self.s.resolve(value) - return wire_next or wire_curr - - def _prepare_value_for_Slice(self, value): - assert isinstance(value, (ast.Signal, ast.Slice, ast.Cat)) - return self(value) - - def on_Part(self, value): - offset = self.s.expand(value.offset) - if isinstance(offset, ast.Const): - start = offset.value * value.stride - stop = start + value.width - slice = self(ast.Slice(value.value, start, min(len(value.value), stop))) - if len(value.value) >= stop: - return slice - else: - dummy_wire = self.s.rtlil.wire(stop - len(value.value)) - return "{{ {} {} }}".format(dummy_wire, slice) - else: - # Only so many possible parts. The amount of branches is exponential; if value.offset - # is large (e.g. 32-bit wide), trying to naively legalize it is likely to exhaust - # system resources. - max_branches = len(value.value) // value.stride + 1 - raise _LegalizeValue(value.offset, - range(1 << len(value.offset))[:max_branches], - value.src_loc) - - def on_Repl(self, value): - raise TypeError # :nocov: - - -class _StatementCompiler(xfrm.StatementVisitor): - def __init__(self, state, rhs_compiler, lhs_compiler): - self.state = state - self.rhs_compiler = rhs_compiler - self.lhs_compiler = lhs_compiler - - self._case = None - self._test_cache = {} - self._has_rhs = False - self._wrap_assign = False - - @contextmanager - def case(self, switch, values, attrs={}, src=""): - try: - old_case = self._case - with switch.case(*values, attrs=attrs, src=src) as self._case: - yield - finally: - self._case = old_case - - def _check_rhs(self, value): - if self._has_rhs or next(iter(value._rhs_signals()), None) is not None: - self._has_rhs = True - - def on_Assign(self, stmt): - self._check_rhs(stmt.rhs) - - lhs_bits, lhs_sign = stmt.lhs.shape() - rhs_bits, rhs_sign = stmt.rhs.shape() - if lhs_bits == rhs_bits: - rhs_sigspec = self.rhs_compiler(stmt.rhs) - else: - # In RTLIL, LHS and RHS of assignment must have exactly same width. - rhs_sigspec = self.rhs_compiler.match_shape( - stmt.rhs, lhs_bits, lhs_sign) - if self._wrap_assign: - # In RTLIL, all assigns are logically sequenced before all switches, even if they are - # interleaved in the source. In nMigen, the source ordering is used. To handle this - # mismatch, we wrap all assigns following a switch in a dummy switch. - with self._case.switch("{ }") as wrap_switch: - with wrap_switch.case() as wrap_case: - wrap_case.assign(self.lhs_compiler(stmt.lhs), rhs_sigspec) - else: - self._case.assign(self.lhs_compiler(stmt.lhs), rhs_sigspec) - - def on_property(self, stmt): - self(stmt._check.eq(stmt.test)) - self(stmt._en.eq(1)) - - en_wire = self.rhs_compiler(stmt._en) - check_wire = self.rhs_compiler(stmt._check) - self.state.rtlil.cell("$" + stmt._kind, ports={ - "\\A": check_wire, - "\\EN": en_wire, - }, src=_src(stmt.src_loc)) - - on_Assert = on_property - on_Assume = on_property - on_Cover = on_property - - def on_Switch(self, stmt): - self._check_rhs(stmt.test) - - if not self.state.expansions: - # We repeatedly translate the same switches over and over (see the LHSGroupAnalyzer - # related code below), and translating the switch test only once helps readability. - if stmt not in self._test_cache: - self._test_cache[stmt] = self.rhs_compiler(stmt.test) - test_sigspec = self._test_cache[stmt] - else: - # However, if the switch test contains an illegal value, then it may not be cached - # (since the illegal value will be repeatedly replaced with different constants), so - # don't cache anything in that case. - test_sigspec = self.rhs_compiler(stmt.test) - - with self._case.switch(test_sigspec, src=_src(stmt.src_loc)) as switch: - for values, stmts in stmt.cases.items(): - case_attrs = {} - if values in stmt.case_src_locs: - case_attrs["src"] = _src(stmt.case_src_locs[values]) - if isinstance(stmt.test, ast.Signal) and stmt.test.decoder: - decoded_values = [] - for value in values: - if "-" in value: - decoded_values.append("") - else: - decoded_values.append(stmt.test.decoder(int(value, 2))) - case_attrs["nmigen.decoding"] = "|".join(decoded_values) - with self.case(switch, values, attrs=case_attrs): - self._wrap_assign = False - self.on_statements(stmts) - self._wrap_assign = True - - def on_statement(self, stmt): - try: - super().on_statement(stmt) - except _LegalizeValue as legalize: - with self._case.switch(self.rhs_compiler(legalize.value), - src=_src(legalize.src_loc)) as switch: - shape = legalize.value.shape() - tests = ["{:0{}b}".format(v, shape.width) for v in legalize.branches] - if tests: - tests[-1] = "-" * shape.width - for branch, test in zip(legalize.branches, tests): - with self.case(switch, (test,)): - self._wrap_assign = False - branch_value = ast.Const(branch, shape) - with self.state.expand_to(legalize.value, branch_value): - self.on_statement(stmt) - self._wrap_assign = True - - def on_statements(self, stmts): - for stmt in stmts: - self.on_statement(stmt) - - -def _convert_fragment(builder, fragment, name_map, hierarchy): - if isinstance(fragment, ir.Instance): - port_map = OrderedDict() - for port_name, (value, dir) in fragment.named_ports.items(): - port_map["\\{}".format(port_name)] = value - - if fragment.type[0] == "$": - return fragment.type, port_map - else: - return "\\{}".format(fragment.type), port_map - - module_name = hierarchy[-1] or "anonymous" - module_attrs = OrderedDict() - if len(hierarchy) == 1: - module_attrs["top"] = 1 - module_attrs["nmigen.hierarchy"] = ".".join(name or "anonymous" for name in hierarchy) - - with builder.module(module_name, attrs=module_attrs) as module: - compiler_state = _ValueCompilerState(module) - rhs_compiler = _RHSValueCompiler(compiler_state) - lhs_compiler = _LHSValueCompiler(compiler_state) - stmt_compiler = _StatementCompiler(compiler_state, rhs_compiler, lhs_compiler) - - verilog_trigger = None - verilog_trigger_sync_emitted = False - - # If the fragment is completely empty, add a dummy wire to it, or Yosys will interpret - # it as a black box by default (when read as Verilog). - if not fragment.ports and not fragment.statements and not fragment.subfragments: - module.wire(1, name="$empty_module_filler") - - # Register all signals driven in the current fragment. This must be done first, as it - # affects further codegen; e.g. whether \sig$next signals will be generated and used. - for domain, signal in fragment.iter_drivers(): - compiler_state.add_driven(signal, sync=domain is not None) - - # Transform all signals used as ports in the current fragment eagerly and outside of - # any hierarchy, to make sure they get sensible (non-prefixed) names. - for signal in fragment.ports: - compiler_state.add_port(signal, fragment.ports[signal]) - compiler_state.resolve_curr(signal) - - # Transform all clocks clocks and resets eagerly and outside of any hierarchy, to make - # sure they get sensible (non-prefixed) names. This does not affect semantics. - for domain, _ in fragment.iter_sync(): - cd = fragment.domains[domain] - compiler_state.resolve_curr(cd.clk) - if cd.rst is not None: - compiler_state.resolve_curr(cd.rst) - - # Transform all subfragments to their respective cells. Transforming signals connected - # to their ports into wires eagerly makes sure they get sensible (prefixed with submodule - # name) names. - memories = OrderedDict() - for subfragment, sub_name in fragment.subfragments: - if sub_name is None: - sub_name = module.anonymous() - - sub_params = OrderedDict() - if hasattr(subfragment, "parameters"): - for param_name, param_value in subfragment.parameters.items(): - if isinstance(param_value, mem.Memory): - memory = param_value - if memory not in memories: - memories[memory] = module.memory(width=memory.width, size=memory.depth, - name=memory.name, attrs=memory.attrs) - addr_bits = bits_for(memory.depth) - data_parts = [] - data_mask = (1 << memory.width) - 1 - for addr in range(memory.depth): - if addr < len(memory.init): - data = memory.init[addr] & data_mask - else: - data = 0 - data_parts.append("{:0{}b}".format(data, memory.width)) - module.cell("$meminit", ports={ - "\\ADDR": rhs_compiler(ast.Const(0, addr_bits)), - "\\DATA": "{}'".format(memory.width * memory.depth) + - "".join(reversed(data_parts)), - }, params={ - "MEMID": memories[memory], - "ABITS": addr_bits, - "WIDTH": memory.width, - "WORDS": memory.depth, - "PRIORITY": 0, - }) - - param_value = memories[memory] - - sub_params[param_name] = param_value - - sub_type, sub_port_map = \ - _convert_fragment(builder, subfragment, name_map, - hierarchy=hierarchy + (sub_name,)) - - sub_ports = OrderedDict() - for port, value in sub_port_map.items(): - if not isinstance(subfragment, ir.Instance): - for signal in value._rhs_signals(): - compiler_state.resolve_curr(signal, prefix=sub_name) - if len(value) > 0: - sub_ports[port] = rhs_compiler(value) - - module.cell(sub_type, name=sub_name, ports=sub_ports, params=sub_params, - attrs=subfragment.attrs) - - # If we emit all of our combinatorial logic into a single RTLIL process, Verilog - # simulators will break horribly, because Yosys write_verilog transforms RTLIL processes - # into always @* blocks with blocking assignment, and that does not create delta cycles. - # - # Therefore, we translate the fragment as many times as there are independent groups - # of signals (a group is a transitive closure of signals that appear together on LHS), - # splitting them into many RTLIL (and thus Verilog) processes. - lhs_grouper = xfrm.LHSGroupAnalyzer() - lhs_grouper.on_statements(fragment.statements) - - for group, group_signals in lhs_grouper.groups().items(): - lhs_group_filter = xfrm.LHSGroupFilter(group_signals) - group_stmts = lhs_group_filter(fragment.statements) - - with module.process(name="$group_{}".format(group)) as process: - with process.case() as case: - # For every signal in comb domain, assign \sig$next to the reset value. - # For every signal in sync domains, assign \sig$next to the current - # value (\sig). - for domain, signal in fragment.iter_drivers(): - if signal not in group_signals: - continue - if domain is None: - prev_value = ast.Const(signal.reset, signal.width) - else: - prev_value = signal - case.assign(lhs_compiler(signal), rhs_compiler(prev_value)) - - # Convert statements into decision trees. - stmt_compiler._case = case - stmt_compiler._has_rhs = False - stmt_compiler._wrap_assign = False - stmt_compiler(group_stmts) - - # Verilog `always @*` blocks will not run if `*` does not match anything, i.e. - # if the implicit sensitivity list is empty. We check this while translating, - # by looking for any signals on RHS. If there aren't any, we add some logic - # whose only purpose is to trigger Verilog simulators when it converts - # through RTLIL and to Verilog, by populating the sensitivity list. - # - # Unfortunately, while this workaround allows true (event-driven) Verilog - # simulators to work properly, and is universally ignored by synthesizers, - # Verilator rejects it. - # - # Yosys >=0.9+3468 emits a better workaround on its own, so this code can be - # removed completely once support for Yosys 0.9 is dropped. - if not stmt_compiler._has_rhs: - if verilog_trigger is None: - verilog_trigger = \ - module.wire(1, name="$verilog_initial_trigger") - case.assign(verilog_trigger, verilog_trigger) - - # For every signal in the sync domain, assign \sig's initial value (which will - # end up as the \init reg attribute) to the reset value. - with process.sync("init") as sync: - for domain, signal in fragment.iter_sync(): - if signal not in group_signals: - continue - wire_curr, wire_next = compiler_state.resolve(signal) - sync.update(wire_curr, rhs_compiler(ast.Const(signal.reset, signal.width))) - - # The Verilog simulator trigger needs to change at time 0, so if we haven't - # yet done that in some process, do it. - if verilog_trigger and not verilog_trigger_sync_emitted: - sync.update(verilog_trigger, "1'0") - verilog_trigger_sync_emitted = True - - # For every signal in every sync domain, assign \sig to \sig$next. The sensitivity - # list, however, differs between domains: for domains with sync reset, it is - # `[pos|neg]edge clk`, for sync domains with async reset it is `[pos|neg]edge clk - # or posedge rst`. - for domain, signals in fragment.drivers.items(): - if domain is None: - continue - - signals = signals & group_signals - if not signals: - continue - - cd = fragment.domains[domain] - - triggers = [] - triggers.append((cd.clk_edge + "edge", compiler_state.resolve_curr(cd.clk))) - if cd.async_reset: - triggers.append(("posedge", compiler_state.resolve_curr(cd.rst))) - - for trigger in triggers: - with process.sync(*trigger) as sync: - for signal in signals: - wire_curr, wire_next = compiler_state.resolve(signal) - sync.update(wire_curr, wire_next) - - # Any signals that are used but neither driven nor connected to an input port always - # assume their reset values. We need to assign the reset value explicitly, since only - # driven sync signals are handled by the logic above. - # - # Because this assignment is done at a late stage, a single Signal object can get assigned - # many times, once in each module it is used. This is a deliberate decision; the possible - # alternatives are to add ports for undriven signals (which requires choosing one module - # to drive it to reset value arbitrarily) or to replace them with their reset value (which - # removes valuable source location information). - driven = ast.SignalSet() - for domain, signals in fragment.iter_drivers(): - driven.update(flatten(signal._lhs_signals() for signal in signals)) - driven.update(fragment.iter_ports(dir="i")) - driven.update(fragment.iter_ports(dir="io")) - for subfragment, sub_name in fragment.subfragments: - driven.update(subfragment.iter_ports(dir="o")) - driven.update(subfragment.iter_ports(dir="io")) - - for wire in compiler_state.wires: - if wire in driven: - continue - wire_curr, _ = compiler_state.wires[wire] - module.connect(wire_curr, rhs_compiler(ast.Const(wire.reset, wire.width))) - - # Collect the names we've given to our ports in RTLIL, and correlate these with the signals - # represented by these ports. If we are a submodule, this will be necessary to create a cell - # for us in the parent module. - port_map = OrderedDict() - for signal in fragment.ports: - port_map[compiler_state.resolve_curr(signal)] = signal - - # Finally, collect tha names we've given to each wire in RTLIL, and provide these to - # the caller, to allow manipulating them in the toolchain. - for signal in compiler_state.wires: - wire_name = compiler_state.resolve_curr(signal) - if wire_name.startswith("\\"): - wire_name = wire_name[1:] - name_map[signal] = hierarchy + (wire_name,) - - return module.name, port_map - - -def convert_fragment(fragment, name="top"): - assert isinstance(fragment, ir.Fragment) - builder = _Builder() - name_map = ast.SignalDict() - _convert_fragment(builder, fragment, name_map, hierarchy=(name,)) - return str(builder), name_map - - -def convert(elaboratable, name="top", platform=None, **kwargs): - fragment = ir.Fragment.get(elaboratable, platform).prepare(**kwargs) - il_text, name_map = convert_fragment(fragment, name) - return il_text +import warnings +warnings.warn("instead of nmigen.back.rtlil, use amaranth.back.rtlil", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/back/verilog.py b/nmigen/back/verilog.py index 181a765..0ba9eca 100644 --- a/nmigen/back/verilog.py +++ b/nmigen/back/verilog.py @@ -1,61 +1,7 @@ -from .._toolchain.yosys import * -from . import rtlil +from amaranth.back.verilog import * +from amaranth.back.verilog import __all__ -__all__ = ["YosysError", "convert", "convert_fragment"] - - -def _convert_rtlil_text(rtlil_text, *, strip_internal_attrs=False, write_verilog_opts=()): - # this version requirement needs to be synchronized with the one in setup.py! - yosys = find_yosys(lambda ver: ver >= (0, 9)) - yosys_version = yosys.version() - - script = [] - script.append("read_ilang <= (0, 9, 3468): - # Yosys >=0.9+3468 (since commit 128522f1) emits the workaround for the `always @*` - # initial scheduling issue on its own. - script.append("delete w:$verilog_initial_trigger") - - if yosys_version >= (0, 9, 3527): - # Yosys >=0.9+3527 (since commit 656ee70f) supports the `-nomux` option for the `proc` - # script pass. Because the individual `proc_*` passes are not a stable interface, - # `proc -nomux` is used instead, if available. - script.append("proc -nomux") - else: - # On earlier versions, use individual `proc_*` passes; this is a known range of Yosys - # versions and we know it's compatible with what nMigen does. - script.append("proc_init") - script.append("proc_arst") - script.append("proc_dff") - script.append("proc_clean") - script.append("memory_collect") - - if strip_internal_attrs: - attr_map = [] - attr_map.append("-remove generator") - attr_map.append("-remove top") - attr_map.append("-remove src") - attr_map.append("-remove nmigen.hierarchy") - attr_map.append("-remove nmigen.decoding") - script.append("attrmap {}".format(" ".join(attr_map))) - script.append("attrmap -modattr {}".format(" ".join(attr_map))) - - script.append("write_verilog -norename {}".format(" ".join(write_verilog_opts))) - - return yosys.run(["-q", "-"], "\n".join(script), - # At the moment, Yosys always shows a warning indicating that not all processes can be - # translated to Verilog. We carefully emit only the processes that *can* be translated, and - # squash this warning. Once Yosys' write_verilog pass is fixed, we should remove this. - ignore_warnings=True) - - -def convert_fragment(*args, strip_internal_attrs=False, **kwargs): - rtlil_text, name_map = rtlil.convert_fragment(*args, **kwargs) - return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs), name_map - - -def convert(*args, strip_internal_attrs=False, **kwargs): - rtlil_text = rtlil.convert(*args, **kwargs) - return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs) +import warnings +warnings.warn("instead of nmigen.back.verilog, use amaranth.back.verilog", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/build/__init__.py b/nmigen/build/__init__.py index c4bc9f3..96e9358 100644 --- a/nmigen/build/__init__.py +++ b/nmigen/build/__init__.py @@ -1,3 +1,6 @@ -from .dsl import * -from .res import ResourceError -from .plat import * +from amaranth.build import * + + +import warnings +warnings.warn("instead of nmigen.build, use amaranth.build", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/build/dsl.py b/nmigen/build/dsl.py index 3b445f6..a68be3c 100644 --- a/nmigen/build/dsl.py +++ b/nmigen/build/dsl.py @@ -1,256 +1,7 @@ -from collections import OrderedDict +from amaranth.build.dsl import * +from amaranth.build.dsl import __all__ -__all__ = ["Pins", "PinsN", "DiffPairs", "DiffPairsN", - "Attrs", "Clock", "Subsignal", "Resource", "Connector"] - - -class Pins: - def __init__(self, names, *, dir="io", invert=False, conn=None, assert_width=None): - if not isinstance(names, str): - raise TypeError("Names must be a whitespace-separated string, not {!r}" - .format(names)) - names = names.split() - - if conn is not None: - conn_name, conn_number = conn - if not (isinstance(conn_name, str) and isinstance(conn_number, (int, str))): - raise TypeError("Connector must be None or a pair of string (connector name) and " - "integer/string (connector number), not {!r}" - .format(conn)) - names = ["{}_{}:{}".format(conn_name, conn_number, name) for name in names] - - if dir not in ("i", "o", "io", "oe"): - raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", or \"io\", not {!r}" - .format(dir)) - - if assert_width is not None and len(names) != assert_width: - raise AssertionError("{} names are specified ({}), but {} names are expected" - .format(len(names), " ".join(names), assert_width)) - - self.names = names - self.dir = dir - self.invert = bool(invert) - - def __len__(self): - return len(self.names) - - def __iter__(self): - return iter(self.names) - - def map_names(self, mapping, resource): - mapped_names = [] - for name in self.names: - while ":" in name: - if name not in mapping: - raise NameError("Resource {!r} refers to nonexistent connector pin {}" - .format(resource, name)) - name = mapping[name] - mapped_names.append(name) - return mapped_names - - def __repr__(self): - return "(pins{} {} {})".format("-n" if self.invert else "", - self.dir, " ".join(self.names)) - - -def PinsN(*args, **kwargs): - return Pins(*args, invert=True, **kwargs) - - -class DiffPairs: - def __init__(self, p, n, *, dir="io", invert=False, conn=None, assert_width=None): - self.p = Pins(p, dir=dir, conn=conn, assert_width=assert_width) - self.n = Pins(n, dir=dir, conn=conn, assert_width=assert_width) - - if len(self.p.names) != len(self.n.names): - raise TypeError("Positive and negative pins must have the same width, but {!r} " - "and {!r} do not" - .format(self.p, self.n)) - - self.dir = dir - self.invert = bool(invert) - - def __len__(self): - return len(self.p.names) - - def __iter__(self): - return zip(self.p.names, self.n.names) - - def __repr__(self): - return "(diffpairs{} {} (p {}) (n {}))".format("-n" if self.invert else "", - self.dir, " ".join(self.p.names), " ".join(self.n.names)) - - -def DiffPairsN(*args, **kwargs): - return DiffPairs(*args, invert=True, **kwargs) - - -class Attrs(OrderedDict): - def __init__(self, **attrs): - for key, value in attrs.items(): - if not (value is None or isinstance(value, (str, int)) or hasattr(value, "__call__")): - raise TypeError("Value of attribute {} must be None, int, str, or callable, " - "not {!r}" - .format(key, value)) - - super().__init__(**attrs) - - def __repr__(self): - items = [] - for key, value in self.items(): - if value is None: - items.append("!" + key) - else: - items.append(key + "=" + repr(value)) - return "(attrs {})".format(" ".join(items)) - - -class Clock: - def __init__(self, frequency): - if not isinstance(frequency, (float, int)): - raise TypeError("Clock frequency must be a number") - - self.frequency = float(frequency) - - @property - def period(self): - return 1 / self.frequency - - def __repr__(self): - return "(clock {})".format(self.frequency) - - -class Subsignal: - def __init__(self, name, *args): - self.name = name - self.ios = [] - self.attrs = Attrs() - self.clock = None - - if not args: - raise ValueError("Missing I/O constraints") - for arg in args: - if isinstance(arg, (Pins, DiffPairs)): - if not self.ios: - self.ios.append(arg) - else: - raise TypeError("Pins and DiffPairs are incompatible with other location or " - "subsignal constraints, but {!r} appears after {!r}" - .format(arg, self.ios[-1])) - elif isinstance(arg, Subsignal): - if not self.ios or isinstance(self.ios[-1], Subsignal): - self.ios.append(arg) - else: - raise TypeError("Subsignal is incompatible with location constraints, but " - "{!r} appears after {!r}" - .format(arg, self.ios[-1])) - elif isinstance(arg, Attrs): - self.attrs.update(arg) - elif isinstance(arg, Clock): - if self.ios and isinstance(self.ios[-1], (Pins, DiffPairs)): - if self.clock is None: - self.clock = arg - else: - raise ValueError("Clock constraint can be applied only once") - else: - raise TypeError("Clock constraint can only be applied to Pins or DiffPairs, " - "not {!r}" - .format(self.ios[-1])) - else: - raise TypeError("Constraint must be one of Pins, DiffPairs, Subsignal, Attrs, " - "or Clock, not {!r}" - .format(arg)) - - def _content_repr(self): - parts = [] - for io in self.ios: - parts.append(repr(io)) - if self.clock is not None: - parts.append(repr(self.clock)) - if self.attrs: - parts.append(repr(self.attrs)) - return " ".join(parts) - - def __repr__(self): - return "(subsignal {} {})".format(self.name, self._content_repr()) - - -class Resource(Subsignal): - @classmethod - def family(cls, name_or_number, number=None, *, ios, default_name, name_suffix=""): - # This constructor accepts two different forms: - # 1. Number-only form: - # Resource.family(0, default_name="name", ios=[Pins("A0 A1")]) - # 2. Name-and-number (name override) form: - # Resource.family("override", 0, default_name="name", ios=...) - # This makes it easier to build abstractions for resources, e.g. an SPIResource abstraction - # could simply delegate to `Resource.family(*args, default_name="spi", ios=ios)`. - # The name_suffix argument is meant to support creating resources with - # similar names, such as spi_flash, spi_flash_2x, etc. - if name_suffix: # Only add "_" if we actually have a suffix. - name_suffix = "_" + name_suffix - - if number is None: # name_or_number is number - return cls(default_name + name_suffix, name_or_number, *ios) - else: # name_or_number is name - return cls(name_or_number + name_suffix, number, *ios) - - def __init__(self, name, number, *args): - super().__init__(name, *args) - - self.number = number - - def __repr__(self): - return "(resource {} {} {})".format(self.name, self.number, self._content_repr()) - - -class Connector: - def __init__(self, name, number, io, *, conn=None): - self.name = name - self.number = number - mapping = OrderedDict() - - if isinstance(io, dict): - for conn_pin, plat_pin in io.items(): - if not isinstance(conn_pin, str): - raise TypeError("Connector pin name must be a string, not {!r}" - .format(conn_pin)) - if not isinstance(plat_pin, str): - raise TypeError("Platform pin name must be a string, not {!r}" - .format(plat_pin)) - mapping[conn_pin] = plat_pin - - elif isinstance(io, str): - for conn_pin, plat_pin in enumerate(io.split(), start=1): - if plat_pin == "-": - continue - - mapping[str(conn_pin)] = plat_pin - else: - raise TypeError("Connector I/Os must be a dictionary or a string, not {!r}" - .format(io)) - - if conn is not None: - conn_name, conn_number = conn - if not (isinstance(conn_name, str) and isinstance(conn_number, (int, str))): - raise TypeError("Connector must be None or a pair of string (connector name) and " - "integer/string (connector number), not {!r}" - .format(conn)) - - for conn_pin, plat_pin in mapping.items(): - mapping[conn_pin] = "{}_{}:{}".format(conn_name, conn_number, plat_pin) - - self.mapping = mapping - - def __repr__(self): - return "(connector {} {} {})".format(self.name, self.number, - " ".join("{}=>{}".format(conn, plat) - for conn, plat in self.mapping.items())) - - def __len__(self): - return len(self.mapping) - - def __iter__(self): - for conn_pin, plat_pin in self.mapping.items(): - yield "{}_{}:{}".format(self.name, self.number, conn_pin), plat_pin +import warnings +warnings.warn("instead of nmigen.build.dsl, use amaranth.build.dsl", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/build/plat.py b/nmigen/build/plat.py index c1d8fc6..f4b7cf3 100644 --- a/nmigen/build/plat.py +++ b/nmigen/build/plat.py @@ -1,444 +1,7 @@ -from collections import OrderedDict -from abc import ABCMeta, abstractmethod, abstractproperty -import os -import textwrap -import re -import jinja2 +from amaranth.build.plat import * +from amaranth.build.plat import __all__ -from .. import __version__ -from .._toolchain import * -from ..hdl import * -from ..hdl.xfrm import SampleLowerer, DomainLowerer -from ..lib.cdc import ResetSynchronizer -from ..back import rtlil, verilog -from .res import * -from .run import * - -__all__ = ["Platform", "TemplatedPlatform"] - - -class Platform(ResourceManager, metaclass=ABCMeta): - resources = abstractproperty() - connectors = abstractproperty() - default_clk = None - default_rst = None - required_tools = abstractproperty() - - def __init__(self): - super().__init__(self.resources, self.connectors) - - self.extra_files = OrderedDict() - - self._prepared = False - - @property - def default_clk_constraint(self): - if self.default_clk is None: - raise AttributeError("Platform '{}' does not define a default clock" - .format(type(self).__name__)) - return self.lookup(self.default_clk).clock - - @property - def default_clk_frequency(self): - constraint = self.default_clk_constraint - if constraint is None: - raise AttributeError("Platform '{}' does not constrain its default clock" - .format(type(self).__name__)) - return constraint.frequency - - def add_file(self, filename, content): - if not isinstance(filename, str): - raise TypeError("File name must be a string, not {!r}" - .format(filename)) - if hasattr(content, "read"): - content = content.read() - elif not isinstance(content, (str, bytes)): - raise TypeError("File contents must be str, bytes, or a file-like object, not {!r}" - .format(content)) - if filename in self.extra_files: - if self.extra_files[filename] != content: - raise ValueError("File {!r} already exists" - .format(filename)) - else: - self.extra_files[filename] = content - - def iter_files(self, *suffixes): - for filename in self.extra_files: - if filename.endswith(suffixes): - yield filename - - @property - def _toolchain_env_var(self): - return f"NMIGEN_ENV_{self.toolchain}" - - def build(self, elaboratable, name="top", - build_dir="build", do_build=True, - program_opts=None, do_program=False, - **kwargs): - # The following code performs a best-effort check for presence of required tools upfront, - # before performing any build actions, to provide a better diagnostic. It does not handle - # several corner cases: - # 1. `require_tool` does not source toolchain environment scripts, so if such a script - # is used, the check is skipped, and `execute_local()` may fail; - # 2. if the design is not built (do_build=False), most of the tools are not required and - # in fact might not be available if the design will be built manually with a different - # environment script specified, or on a different machine; however, Yosys is required - # by virtually every platform anyway, to provide debug Verilog output, and `prepare()` - # may fail. - # This is OK because even if `require_tool` succeeds, the toolchain might be broken anyway. - # The check only serves to catch common errors earlier. - if do_build and self._toolchain_env_var not in os.environ: - for tool in self.required_tools: - require_tool(tool) - - plan = self.prepare(elaboratable, name, **kwargs) - if not do_build: - return plan - - products = plan.execute_local(build_dir) - if not do_program: - return products - - self.toolchain_program(products, name, **(program_opts or {})) - - def has_required_tools(self): - if self._toolchain_env_var in os.environ: - return True - return all(has_tool(name) for name in self.required_tools) - - def create_missing_domain(self, name): - # Simple instantiation of a clock domain driven directly by the board clock and reset. - # This implementation uses a single ResetSynchronizer to ensure that: - # * an external reset is definitely synchronized to the system clock; - # * release of power-on reset, which is inherently asynchronous, is synchronized to - # the system clock. - # Many device families provide advanced primitives for tackling reset. If these exist, - # they should be used instead. - if name == "sync" and self.default_clk is not None: - clk_i = self.request(self.default_clk).i - if self.default_rst is not None: - rst_i = self.request(self.default_rst).i - else: - rst_i = Const(0) - - m = Module() - m.domains += ClockDomain("sync") - m.d.comb += ClockSignal("sync").eq(clk_i) - m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync") - return m - - def prepare(self, elaboratable, name="top", **kwargs): - assert not self._prepared - self._prepared = True - - fragment = Fragment.get(elaboratable, self) - fragment = SampleLowerer()(fragment) - fragment._propagate_domains(self.create_missing_domain, platform=self) - fragment = DomainLowerer()(fragment) - - def add_pin_fragment(pin, pin_fragment): - pin_fragment = Fragment.get(pin_fragment, self) - if not isinstance(pin_fragment, Instance): - pin_fragment.flatten = True - fragment.add_subfragment(pin_fragment, name="pin_{}".format(pin.name)) - - for pin, port, attrs, invert in self.iter_single_ended_pins(): - if pin.dir == "i": - add_pin_fragment(pin, self.get_input(pin, port, attrs, invert)) - if pin.dir == "o": - add_pin_fragment(pin, self.get_output(pin, port, attrs, invert)) - if pin.dir == "oe": - add_pin_fragment(pin, self.get_tristate(pin, port, attrs, invert)) - if pin.dir == "io": - add_pin_fragment(pin, self.get_input_output(pin, port, attrs, invert)) - - for pin, port, attrs, invert in self.iter_differential_pins(): - if pin.dir == "i": - add_pin_fragment(pin, self.get_diff_input(pin, port, attrs, invert)) - if pin.dir == "o": - add_pin_fragment(pin, self.get_diff_output(pin, port, attrs, invert)) - if pin.dir == "oe": - add_pin_fragment(pin, self.get_diff_tristate(pin, port, attrs, invert)) - if pin.dir == "io": - add_pin_fragment(pin, self.get_diff_input_output(pin, port, attrs, invert)) - - fragment._propagate_ports(ports=self.iter_ports(), all_undef_as_ports=False) - return self.toolchain_prepare(fragment, name, **kwargs) - - @abstractmethod - def toolchain_prepare(self, fragment, name, **kwargs): - """ - Convert the ``fragment`` and constraints recorded in this :class:`Platform` into - a :class:`BuildPlan`. - """ - raise NotImplementedError # :nocov: - - def toolchain_program(self, products, name, **kwargs): - """ - Extract bitstream for fragment ``name`` from ``products`` and download it to a target. - """ - raise NotImplementedError("Platform '{}' does not support programming" - .format(type(self).__name__)) - - def _check_feature(self, feature, pin, attrs, valid_xdrs, valid_attrs): - if len(valid_xdrs) == 0: - raise NotImplementedError("Platform '{}' does not support {}" - .format(type(self).__name__, feature)) - elif pin.xdr not in valid_xdrs: - raise NotImplementedError("Platform '{}' does not support {} for XDR {}" - .format(type(self).__name__, feature, pin.xdr)) - - if not valid_attrs and attrs: - raise NotImplementedError("Platform '{}' does not support attributes for {}" - .format(type(self).__name__, feature)) - - @staticmethod - def _invert_if(invert, value): - if invert: - return ~value - else: - return value - - def get_input(self, pin, port, attrs, invert): - self._check_feature("single-ended input", pin, attrs, - valid_xdrs=(0,), valid_attrs=None) - - m = Module() - m.d.comb += pin.i.eq(self._invert_if(invert, port)) - return m - - def get_output(self, pin, port, attrs, invert): - self._check_feature("single-ended output", pin, attrs, - valid_xdrs=(0,), valid_attrs=None) - - m = Module() - m.d.comb += port.eq(self._invert_if(invert, pin.o)) - return m - - def get_tristate(self, pin, port, attrs, invert): - self._check_feature("single-ended tristate", pin, attrs, - valid_xdrs=(0,), valid_attrs=None) - - m = Module() - m.submodules += Instance("$tribuf", - p_WIDTH=pin.width, - i_EN=pin.oe, - i_A=self._invert_if(invert, pin.o), - o_Y=port, - ) - return m - - def get_input_output(self, pin, port, attrs, invert): - self._check_feature("single-ended input/output", pin, attrs, - valid_xdrs=(0,), valid_attrs=None) - - m = Module() - m.submodules += Instance("$tribuf", - p_WIDTH=pin.width, - i_EN=pin.oe, - i_A=self._invert_if(invert, pin.o), - o_Y=port, - ) - m.d.comb += pin.i.eq(self._invert_if(invert, port)) - return m - - def get_diff_input(self, pin, port, attrs, invert): - self._check_feature("differential input", pin, attrs, - valid_xdrs=(), valid_attrs=None) - - def get_diff_output(self, pin, port, attrs, invert): - self._check_feature("differential output", pin, attrs, - valid_xdrs=(), valid_attrs=None) - - def get_diff_tristate(self, pin, port, attrs, invert): - self._check_feature("differential tristate", pin, attrs, - valid_xdrs=(), valid_attrs=None) - - def get_diff_input_output(self, pin, port, attrs, invert): - self._check_feature("differential input/output", pin, attrs, - valid_xdrs=(), valid_attrs=None) - - -class TemplatedPlatform(Platform): - toolchain = abstractproperty() - file_templates = abstractproperty() - command_templates = abstractproperty() - - build_script_templates = { - "build_{{name}}.sh": """ - # {{autogenerated}} - set -e{{verbose("x")}} - [ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}" - {{emit_commands("sh")}} - """, - "build_{{name}}.bat": """ - @rem {{autogenerated}} - {{quiet("@echo off")}} - if defined {{platform._toolchain_env_var}} call %{{platform._toolchain_env_var}}% - {{emit_commands("bat")}} - """, - } - - def iter_clock_constraints(self): - for net_signal, port_signal, frequency in super().iter_clock_constraints(): - # Skip any clock constraints placed on signals that are never used in the design. - # Otherwise, it will cause a crash in the vendor platform if it supports clock - # constraints on non-port nets. - if net_signal not in self._name_map: - continue - yield net_signal, port_signal, frequency - - def toolchain_prepare(self, fragment, name, **kwargs): - # Restrict the name of the design to a strict alphanumeric character set. Platforms will - # interpolate the name of the design in many different contexts: filesystem paths, Python - # scripts, Tcl scripts, ad-hoc constraint files, and so on. It is not practical to add - # escaping code that handles every one of their edge cases, so make sure we never hit them - # in the first place. - invalid_char = re.match(r"[^A-Za-z0-9_]", name) - if invalid_char: - raise ValueError("Design name {!r} contains invalid character {!r}; only alphanumeric " - "characters are valid in design names" - .format(name, invalid_char.group(0))) - - # This notice serves a dual purpose: to explain that the file is autogenerated, - # and to incorporate the nMigen version into generated code. - autogenerated = "Automatically generated by nMigen {}. Do not edit.".format(__version__) - - rtlil_text, self._name_map = rtlil.convert_fragment(fragment, name=name) - - def emit_rtlil(): - return rtlil_text - - def emit_verilog(opts=()): - return verilog._convert_rtlil_text(rtlil_text, - strip_internal_attrs=True, write_verilog_opts=opts) - - def emit_debug_verilog(opts=()): - return verilog._convert_rtlil_text(rtlil_text, - strip_internal_attrs=False, write_verilog_opts=opts) - - def emit_commands(syntax): - commands = [] - - for name in self.required_tools: - env_var = tool_env_var(name) - if syntax == "sh": - template = ": ${{{env_var}:={name}}}" - elif syntax == "bat": - template = \ - "if [%{env_var}%] equ [\"\"] set {env_var}=\n" \ - "if [%{env_var}%] equ [] set {env_var}={name}" - else: - assert False - commands.append(template.format(env_var=env_var, name=name)) - - for index, command_tpl in enumerate(self.command_templates): - command = render(command_tpl, origin="".format(index + 1), - syntax=syntax) - command = re.sub(r"\s+", " ", command) - if syntax == "sh": - commands.append(command) - elif syntax == "bat": - commands.append(command + " || exit /b") - else: - assert False - - return "\n".join(commands) - - def get_override(var): - var_env = "NMIGEN_{}".format(var) - if var_env in os.environ: - # On Windows, there is no way to define an "empty but set" variable; it is tempting - # to use a quoted empty string, but it doesn't do what one would expect. Recognize - # this as a useful pattern anyway, and treat `set VAR=""` on Windows the same way - # `export VAR=` is treated on Linux. - return re.sub(r'^\"\"$', "", os.environ[var_env]) - elif var in kwargs: - if isinstance(kwargs[var], str): - return textwrap.dedent(kwargs[var]).strip() - else: - return kwargs[var] - else: - return jinja2.Undefined(name=var) - - @jinja2.contextfunction - def invoke_tool(context, name): - env_var = tool_env_var(name) - if context.parent["syntax"] == "sh": - return "\"${}\"".format(env_var) - elif context.parent["syntax"] == "bat": - return "%{}%".format(env_var) - else: - assert False - - def options(opts): - if isinstance(opts, str): - return opts - else: - return " ".join(opts) - - def hierarchy(signal, separator): - return separator.join(self._name_map[signal][1:]) - - def ascii_escape(string): - def escape_one(match): - if match.group(1) is None: - return match.group(2) - else: - return "_{:02x}_".format(ord(match.group(1)[0])) - return "".join(escape_one(m) for m in re.finditer(r"([^A-Za-z0-9_])|(.)", string)) - - def tcl_escape(string): - return "{" + re.sub(r"([{}\\])", r"\\\1", string) + "}" - - def tcl_quote(string): - return '"' + re.sub(r"([$[\\])", r"\\\1", string) + '"' - - def verbose(arg): - if get_override("verbose"): - return arg - else: - return jinja2.Undefined(name="quiet") - - def quiet(arg): - if get_override("verbose"): - return jinja2.Undefined(name="quiet") - else: - return arg - - def render(source, origin, syntax=None): - try: - source = textwrap.dedent(source).strip() - compiled = jinja2.Template(source, - trim_blocks=True, lstrip_blocks=True, undefined=jinja2.StrictUndefined) - compiled.environment.filters["options"] = options - compiled.environment.filters["hierarchy"] = hierarchy - compiled.environment.filters["ascii_escape"] = ascii_escape - compiled.environment.filters["tcl_escape"] = tcl_escape - compiled.environment.filters["tcl_quote"] = tcl_quote - except jinja2.TemplateSyntaxError as e: - e.args = ("{} (at {}:{})".format(e.message, origin, e.lineno),) - raise - return compiled.render({ - "name": name, - "platform": self, - "emit_rtlil": emit_rtlil, - "emit_verilog": emit_verilog, - "emit_debug_verilog": emit_debug_verilog, - "emit_commands": emit_commands, - "syntax": syntax, - "invoke_tool": invoke_tool, - "get_override": get_override, - "verbose": verbose, - "quiet": quiet, - "autogenerated": autogenerated, - }) - - plan = BuildPlan(script="build_{}".format(name)) - for filename_tpl, content_tpl in self.file_templates.items(): - plan.add_file(render(filename_tpl, origin=filename_tpl), - render(content_tpl, origin=content_tpl)) - for filename, content in self.extra_files.items(): - plan.add_file(filename, content) - return plan +import warnings +warnings.warn("instead of nmigen.build.plat, use amaranth.build.plat", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/build/res.py b/nmigen/build/res.py index fde981f..531e68f 100644 --- a/nmigen/build/res.py +++ b/nmigen/build/res.py @@ -1,256 +1,7 @@ -from collections import OrderedDict - -from ..hdl.ast import * -from ..hdl.rec import * -from ..lib.io import * - -from .dsl import * +from amaranth.build.res import * +from amaranth.build.res import __all__ -__all__ = ["ResourceError", "ResourceManager"] - - -class ResourceError(Exception): - pass - - -class ResourceManager: - def __init__(self, resources, connectors): - self.resources = OrderedDict() - self._requested = OrderedDict() - self._phys_reqd = OrderedDict() - - self.connectors = OrderedDict() - self._conn_pins = OrderedDict() - - # Constraint lists - self._ports = [] - self._clocks = SignalDict() - - self.add_resources(resources) - self.add_connectors(connectors) - - def add_resources(self, resources): - for res in resources: - if not isinstance(res, Resource): - raise TypeError("Object {!r} is not a Resource".format(res)) - if (res.name, res.number) in self.resources: - raise NameError("Trying to add {!r}, but {!r} has the same name and number" - .format(res, self.resources[res.name, res.number])) - self.resources[res.name, res.number] = res - - def add_connectors(self, connectors): - for conn in connectors: - if not isinstance(conn, Connector): - raise TypeError("Object {!r} is not a Connector".format(conn)) - if (conn.name, conn.number) in self.connectors: - raise NameError("Trying to add {!r}, but {!r} has the same name and number" - .format(conn, self.connectors[conn.name, conn.number])) - self.connectors[conn.name, conn.number] = conn - - for conn_pin, plat_pin in conn: - assert conn_pin not in self._conn_pins - self._conn_pins[conn_pin] = plat_pin - - def lookup(self, name, number=0): - if (name, number) not in self.resources: - raise ResourceError("Resource {}#{} does not exist" - .format(name, number)) - return self.resources[name, number] - - def request(self, name, number=0, *, dir=None, xdr=None): - resource = self.lookup(name, number) - if (resource.name, resource.number) in self._requested: - raise ResourceError("Resource {}#{} has already been requested" - .format(name, number)) - - def merge_options(subsignal, dir, xdr): - if isinstance(subsignal.ios[0], Subsignal): - if dir is None: - dir = dict() - if xdr is None: - xdr = dict() - if not isinstance(dir, dict): - raise TypeError("Directions must be a dict, not {!r}, because {!r} " - "has subsignals" - .format(dir, subsignal)) - if not isinstance(xdr, dict): - raise TypeError("Data rate must be a dict, not {!r}, because {!r} " - "has subsignals" - .format(xdr, subsignal)) - for sub in subsignal.ios: - sub_dir = dir.get(sub.name, None) - sub_xdr = xdr.get(sub.name, None) - dir[sub.name], xdr[sub.name] = merge_options(sub, sub_dir, sub_xdr) - else: - if dir is None: - dir = subsignal.ios[0].dir - if xdr is None: - xdr = 0 - if dir not in ("i", "o", "oe", "io", "-"): - raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", \"io\", " - "or \"-\", not {!r}" - .format(dir)) - if dir != subsignal.ios[0].dir and \ - not (subsignal.ios[0].dir == "io" or dir == "-"): - raise ValueError("Direction of {!r} cannot be changed from \"{}\" to \"{}\"; " - "direction can be changed from \"io\" to \"i\", \"o\", or " - "\"oe\", or from anything to \"-\"" - .format(subsignal.ios[0], subsignal.ios[0].dir, dir)) - if not isinstance(xdr, int) or xdr < 0: - raise ValueError("Data rate of {!r} must be a non-negative integer, not {!r}" - .format(subsignal.ios[0], xdr)) - return dir, xdr - - def resolve(resource, dir, xdr, name, attrs): - for attr_key, attr_value in attrs.items(): - if hasattr(attr_value, "__call__"): - attr_value = attr_value(self) - assert attr_value is None or isinstance(attr_value, str) - if attr_value is None: - del attrs[attr_key] - else: - attrs[attr_key] = attr_value - - if isinstance(resource.ios[0], Subsignal): - fields = OrderedDict() - for sub in resource.ios: - fields[sub.name] = resolve(sub, dir[sub.name], xdr[sub.name], - name="{}__{}".format(name, sub.name), - attrs={**attrs, **sub.attrs}) - return Record([ - (f_name, f.layout) for (f_name, f) in fields.items() - ], fields=fields, name=name) - - elif isinstance(resource.ios[0], (Pins, DiffPairs)): - phys = resource.ios[0] - if isinstance(phys, Pins): - phys_names = phys.names - port = Record([("io", len(phys))], name=name) - if isinstance(phys, DiffPairs): - phys_names = [] - record_fields = [] - if not self.should_skip_port_component(None, attrs, "p"): - phys_names += phys.p.names - record_fields.append(("p", len(phys))) - if not self.should_skip_port_component(None, attrs, "n"): - phys_names += phys.n.names - record_fields.append(("n", len(phys))) - port = Record(record_fields, name=name) - if dir == "-": - pin = None - else: - pin = Pin(len(phys), dir, xdr=xdr, name=name) - - for phys_name in phys_names: - if phys_name in self._phys_reqd: - raise ResourceError("Resource component {} uses physical pin {}, but it " - "is already used by resource component {} that was " - "requested earlier" - .format(name, phys_name, self._phys_reqd[phys_name])) - self._phys_reqd[phys_name] = name - - self._ports.append((resource, pin, port, attrs)) - - if pin is not None and resource.clock is not None: - self.add_clock_constraint(pin.i, resource.clock.frequency) - - return pin if pin is not None else port - - else: - assert False # :nocov: - - value = resolve(resource, - *merge_options(resource, dir, xdr), - name="{}_{}".format(resource.name, resource.number), - attrs=resource.attrs) - self._requested[resource.name, resource.number] = value - return value - - def iter_single_ended_pins(self): - for res, pin, port, attrs in self._ports: - if pin is None: - continue - if isinstance(res.ios[0], Pins): - yield pin, port, attrs, res.ios[0].invert - - def iter_differential_pins(self): - for res, pin, port, attrs in self._ports: - if pin is None: - continue - if isinstance(res.ios[0], DiffPairs): - yield pin, port, attrs, res.ios[0].invert - - def should_skip_port_component(self, port, attrs, component): - return False - - def iter_ports(self): - for res, pin, port, attrs in self._ports: - if isinstance(res.ios[0], Pins): - if not self.should_skip_port_component(port, attrs, "io"): - yield port.io - elif isinstance(res.ios[0], DiffPairs): - if not self.should_skip_port_component(port, attrs, "p"): - yield port.p - if not self.should_skip_port_component(port, attrs, "n"): - yield port.n - else: - assert False - - def iter_port_constraints(self): - for res, pin, port, attrs in self._ports: - if isinstance(res.ios[0], Pins): - if not self.should_skip_port_component(port, attrs, "io"): - yield port.io.name, res.ios[0].map_names(self._conn_pins, res), attrs - elif isinstance(res.ios[0], DiffPairs): - if not self.should_skip_port_component(port, attrs, "p"): - yield port.p.name, res.ios[0].p.map_names(self._conn_pins, res), attrs - if not self.should_skip_port_component(port, attrs, "n"): - yield port.n.name, res.ios[0].n.map_names(self._conn_pins, res), attrs - else: - assert False - - def iter_port_constraints_bits(self): - for port_name, pin_names, attrs in self.iter_port_constraints(): - if len(pin_names) == 1: - yield port_name, pin_names[0], attrs - else: - for bit, pin_name in enumerate(pin_names): - yield "{}[{}]".format(port_name, bit), pin_name, attrs - - def add_clock_constraint(self, clock, frequency): - if not isinstance(clock, Signal): - raise TypeError("Object {!r} is not a Signal".format(clock)) - if not isinstance(frequency, (int, float)): - raise TypeError("Frequency must be a number, not {!r}".format(frequency)) - - if clock in self._clocks: - raise ValueError("Cannot add clock constraint on {!r}, which is already constrained " - "to {} Hz" - .format(clock, self._clocks[clock])) - else: - self._clocks[clock] = float(frequency) - - def iter_clock_constraints(self): - # Back-propagate constraints through the input buffer. For clock constraints on pins - # (the majority of cases), toolchains work better if the constraint is defined on the pin - # and not on the buffered internal net; and if the toolchain is advanced enough that - # it considers clock phase and delay of the input buffer, it is *necessary* to define - # the constraint on the pin to match the designer's expectation of phase being referenced - # to the pin. - # - # Constraints on nets with no corresponding input pin (e.g. PLL or SERDES outputs) are not - # affected. - pin_i_to_port = SignalDict() - for res, pin, port, attrs in self._ports: - if hasattr(pin, "i"): - if isinstance(res.ios[0], Pins): - pin_i_to_port[pin.i] = port.io - elif isinstance(res.ios[0], DiffPairs): - pin_i_to_port[pin.i] = port.p - else: - assert False - - for net_signal, frequency in self._clocks.items(): - port_signal = pin_i_to_port.get(net_signal) - yield net_signal, port_signal, frequency +import warnings +warnings.warn("instead of nmigen.build.res, use amaranth.build.res", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/build/run.py b/nmigen/build/run.py index 507237a..03382fb 100644 --- a/nmigen/build/run.py +++ b/nmigen/build/run.py @@ -1,268 +1,7 @@ -from collections import OrderedDict -from contextlib import contextmanager -from abc import ABCMeta, abstractmethod -import os -import sys -import subprocess -import tempfile -import zipfile -import hashlib -import pathlib +from amaranth.build.run import * +from amaranth.build.run import __all__ -__all__ = ["BuildPlan", "BuildProducts", "LocalBuildProducts", "RemoteSSHBuildProducts"] - - - -class BuildPlan: - def __init__(self, script): - """A build plan. - - Parameters - ---------- - script : str - The base name (without extension) of the script that will be executed. - """ - self.script = script - self.files = OrderedDict() - - def add_file(self, filename, content): - """ - Add ``content``, which can be a :class:`str`` or :class:`bytes`, to the build plan - as ``filename``. The file name can be a relative path with directories separated by - forward slashes (``/``). - """ - assert isinstance(filename, str) and filename not in self.files - self.files[filename] = content - - def digest(self, size=64): - """ - Compute a `digest`, a short byte sequence deterministically and uniquely identifying - this build plan. - """ - hasher = hashlib.blake2b(digest_size=size) - for filename in sorted(self.files): - hasher.update(filename.encode("utf-8")) - content = self.files[filename] - if isinstance(content, str): - content = content.encode("utf-8") - hasher.update(content) - hasher.update(self.script.encode("utf-8")) - return hasher.digest() - - def archive(self, file): - """ - Archive files from the build plan into ``file``, which can be either a filename, or - a file-like object. The produced archive is deterministic: exact same files will - always produce exact same archive. - """ - with zipfile.ZipFile(file, "w") as archive: - # Write archive members in deterministic order and with deterministic timestamp. - for filename in sorted(self.files): - archive.writestr(zipfile.ZipInfo(filename), self.files[filename]) - - def execute_local(self, root="build", *, run_script=True): - """ - Execute build plan using the local strategy. Files from the build plan are placed in - the build root directory ``root``, and, if ``run_script`` is ``True``, the script - appropriate for the platform (``{script}.bat`` on Windows, ``{script}.sh`` elsewhere) is - executed in the build root. - - Returns :class:`LocalBuildProducts`. - """ - os.makedirs(root, exist_ok=True) - cwd = os.getcwd() - try: - os.chdir(root) - - for filename, content in self.files.items(): - filename = pathlib.Path(filename) - # Forbid parent directory components completely to avoid the possibility - # of writing outside the build root. - assert ".." not in filename.parts - dirname = os.path.dirname(filename) - if dirname: - os.makedirs(dirname, exist_ok=True) - - mode = "wt" if isinstance(content, str) else "wb" - with open(filename, mode) as f: - f.write(content) - - if run_script: - if sys.platform.startswith("win32"): - # Without "call", "cmd /c {}.bat" will return 0. - # See https://stackoverflow.com/a/30736987 for a detailed explanation of why. - # Running the script manually from a command prompt is unaffected. - subprocess.check_call(["cmd", "/c", "call {}.bat".format(self.script)]) - else: - subprocess.check_call(["sh", "{}.sh".format(self.script)]) - - return LocalBuildProducts(os.getcwd()) - - finally: - os.chdir(cwd) - - def execute_remote_ssh(self, *, connect_to = {}, root, run_script=True): - """ - Execute build plan using the remote SSH strategy. Files from the build - plan are transferred via SFTP to the directory ``root`` on a remote - server. If ``run_script`` is ``True``, the ``paramiko`` SSH client will - then run ``{script}.sh``. ``root`` can either be an absolute or - relative (to the login directory) path. - - ``connect_to`` is a dictionary that holds all input arguments to - ``paramiko``'s ``SSHClient.connect`` - (`documentation `_). - At a minimum, the ``hostname`` input argument must be supplied in this - dictionary as the remote server. - - Returns :class:`RemoteSSHBuildProducts`. - """ - from paramiko import SSHClient - - with SSHClient() as client: - client.load_system_host_keys() - client.connect(**connect_to) - - with client.open_sftp() as sftp: - def mkdir_exist_ok(path): - try: - sftp.mkdir(str(path)) - except IOError as e: - # mkdir fails if directory exists. This is fine in nmigen.build. - # Reraise errors containing e.errno info. - if e.errno: - raise e - - def mkdirs(path): - # Iteratively create parent directories of a file by iterating over all - # parents except for the root ("."). Slicing the parents results in - # TypeError, so skip over the root ("."); this also handles files - # already in the root directory. - for parent in reversed(path.parents): - if parent == pathlib.PurePosixPath("."): - continue - else: - mkdir_exist_ok(parent) - - mkdir_exist_ok(root) - - sftp.chdir(root) - for filename, content in self.files.items(): - filename = pathlib.PurePosixPath(filename) - assert ".." not in filename.parts - - mkdirs(filename) - - mode = "wt" if isinstance(content, str) else "wb" - with sftp.file(str(filename), mode) as f: - # "b/t" modifier ignored in SFTP. - if mode == "wt": - f.write(content.encode("utf-8")) - else: - f.write(content) - - if run_script: - transport = client.get_transport() - channel = transport.open_session() - channel.set_combine_stderr(True) - - cmd = "if [ -f ~/.profile ]; then . ~/.profile; fi && cd {} && sh {}.sh".format(root, self.script) - channel.exec_command(cmd) - - # Show the output from the server while products are built. - buf = channel.recv(1024) - while buf: - print(buf.decode("utf-8"), end="") - buf = channel.recv(1024) - - return RemoteSSHBuildProducts(connect_to, root) - - def execute(self): - """ - Execute build plan using the default strategy. Use one of the ``execute_*`` methods - explicitly to have more control over the strategy. - """ - return self.execute_local() - - -class BuildProducts(metaclass=ABCMeta): - @abstractmethod - def get(self, filename, mode="b"): - """ - Extract ``filename`` from build products, and return it as a :class:`bytes` (if ``mode`` - is ``"b"``) or a :class:`str` (if ``mode`` is ``"t"``). - """ - assert mode in ("b", "t") - - @contextmanager - def extract(self, *filenames): - """ - Extract ``filenames`` from build products, place them in an OS-specific temporary file - location, with the extension preserved, and delete them afterwards. This method is used - as a context manager, e.g.: :: - - with products.extract("bitstream.bin", "programmer.cfg") \ - as bitstream_filename, config_filename: - subprocess.check_call(["program", "-c", config_filename, bitstream_filename]) - """ - files = [] - try: - for filename in filenames: - # On Windows, a named temporary file (as created by Python) is not accessible to - # others if it's still open within the Python process, so we close it and delete - # it manually. - file = tempfile.NamedTemporaryFile( - prefix="nmigen_", suffix="_" + os.path.basename(filename), - delete=False) - files.append(file) - file.write(self.get(filename)) - file.close() - - if len(files) == 0: - return (yield) - elif len(files) == 1: - return (yield files[0].name) - else: - return (yield [file.name for file in files]) - finally: - for file in files: - os.unlink(file.name) - - -class LocalBuildProducts(BuildProducts): - def __init__(self, root): - # We provide no guarantees that files will be available on the local filesystem (i.e. in - # any way other than through `products.get()`) in general, so downstream code must never - # rely on this, even when we happen to use a local build most of the time. - self.__root = root - - def get(self, filename, mode="b"): - super().get(filename, mode) - with open(os.path.join(self.__root, filename), "r" + mode) as f: - return f.read() - - -class RemoteSSHBuildProducts(BuildProducts): - def __init__(self, connect_to, root): - self.__connect_to = connect_to - self.__root = root - - def get(self, filename, mode="b"): - super().get(filename, mode) - - from paramiko import SSHClient - - with SSHClient() as client: - client.load_system_host_keys() - client.connect(**self.__connect_to) - - with client.open_sftp() as sftp: - sftp.chdir(self.__root) - - with sftp.file(filename, "r" + mode) as f: - # "b/t" modifier ignored in SFTP. - if mode == "t": - return f.read().decode("utf-8") - else: - return f.read() +import warnings +warnings.warn("instead of nmigen.build.run, use amaranth.build.run", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/cli.py b/nmigen/cli.py index 51e9a95..57bc0ba 100644 --- a/nmigen/cli.py +++ b/nmigen/cli.py @@ -1,78 +1,7 @@ -import argparse - -from .hdl.ir import Fragment -from .back import rtlil, cxxrtl, verilog -from .sim import Simulator +from amaranth.cli import * +from amaranth.cli import __all__ -__all__ = ["main"] - - -def main_parser(parser=None): - if parser is None: - parser = argparse.ArgumentParser() - - p_action = parser.add_subparsers(dest="action") - - p_generate = p_action.add_parser("generate", - help="generate RTLIL, Verilog or CXXRTL from the design") - p_generate.add_argument("-t", "--type", dest="generate_type", - metavar="LANGUAGE", choices=["il", "cc", "v"], - help="generate LANGUAGE (il for RTLIL, v for Verilog, cc for CXXRTL; default: file extension of FILE, if given)") - p_generate.add_argument("generate_file", - metavar="FILE", type=argparse.FileType("w"), nargs="?", - help="write generated code to FILE") - - p_simulate = p_action.add_parser( - "simulate", help="simulate the design") - p_simulate.add_argument("-v", "--vcd-file", - metavar="VCD-FILE", type=argparse.FileType("w"), - help="write execution trace to VCD-FILE") - p_simulate.add_argument("-w", "--gtkw-file", - metavar="GTKW-FILE", type=argparse.FileType("w"), - help="write GTKWave configuration to GTKW-FILE") - p_simulate.add_argument("-p", "--period", dest="sync_period", - metavar="TIME", type=float, default=1e-6, - help="set 'sync' clock domain period to TIME (default: %(default)s)") - p_simulate.add_argument("-c", "--clocks", dest="sync_clocks", - metavar="COUNT", type=int, required=True, - help="simulate for COUNT 'sync' clock periods") - - return parser - - -def main_runner(parser, args, design, platform=None, name="top", ports=()): - if args.action == "generate": - fragment = Fragment.get(design, platform) - generate_type = args.generate_type - if generate_type is None and args.generate_file: - if args.generate_file.name.endswith(".il"): - generate_type = "il" - if args.generate_file.name.endswith(".cc"): - generate_type = "cc" - if args.generate_file.name.endswith(".v"): - generate_type = "v" - if generate_type is None: - parser.error("Unable to auto-detect language, specify explicitly with -t/--type") - if generate_type == "il": - output = rtlil.convert(fragment, name=name, ports=ports) - if generate_type == "cc": - output = cxxrtl.convert(fragment, name=name, ports=ports) - if generate_type == "v": - output = verilog.convert(fragment, name=name, ports=ports) - if args.generate_file: - args.generate_file.write(output) - else: - print(output) - - if args.action == "simulate": - fragment = Fragment.get(design, platform) - sim = Simulator(fragment) - sim.add_clock(args.sync_period) - with sim.write_vcd(vcd_file=args.vcd_file, gtkw_file=args.gtkw_file, traces=ports): - sim.run_until(args.sync_period * args.sync_clocks, run_passive=True) - - -def main(*args, **kwargs): - parser = main_parser() - main_runner(parser, parser.parse_args(), *args, **kwargs) +import warnings +warnings.warn("instead of nmigen.cli, use amaranth.cli", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/__init__.py b/nmigen/compat/__init__.py index bdf1313..648d245 100644 --- a/nmigen/compat/__init__.py +++ b/nmigen/compat/__init__.py @@ -1,11 +1,6 @@ -from .fhdl.structure import * -from .fhdl.module import * -from .fhdl.specials import * -from .fhdl.bitcontainer import * -from .fhdl.decorators import * -# from .fhdl.simplify import * +from amaranth.compat import * -from .sim import * -from .genlib.record import * -from .genlib.fsm import * +import warnings +warnings.warn("instead of nmigen.compat, use amaranth.compat", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/__init__.py b/nmigen/compat/fhdl/__init__.py index e69de29..c7a2acc 100644 --- a/nmigen/compat/fhdl/__init__.py +++ b/nmigen/compat/fhdl/__init__.py @@ -0,0 +1,6 @@ +from amaranth.compat.fhdl import * + + +import warnings +warnings.warn("instead of nmigen.compat.fhdl, use amaranth.compat.fhdl", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/bitcontainer.py b/nmigen/compat/fhdl/bitcontainer.py index 4b91572..1e38f69 100644 --- a/nmigen/compat/fhdl/bitcontainer.py +++ b/nmigen/compat/fhdl/bitcontainer.py @@ -1,21 +1,7 @@ -from ... import utils -from ...hdl import ast -from ..._utils import deprecated +from amaranth.compat.fhdl.bitcontainer import * +from amaranth.compat.fhdl.bitcontainer import __all__ -__all__ = ["log2_int", "bits_for", "value_bits_sign"] - - -@deprecated("instead of `log2_int`, use `nmigen.utils.log2_int`") -def log2_int(n, need_pow2=True): - return utils.log2_int(n, need_pow2) - - -@deprecated("instead of `bits_for`, use `nmigen.utils.bits_for`") -def bits_for(n, require_sign_bit=False): - return utils.bits_for(n, require_sign_bit) - - -@deprecated("instead of `value_bits_sign(v)`, use `v.shape()`") -def value_bits_sign(v): - return tuple(ast.Value.cast(v).shape()) +import warnings +warnings.warn("instead of nmigen.compat.fhdl.bitcontainer, use amaranth.compat.fhdl.bitcontainer", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/conv_output.py b/nmigen/compat/fhdl/conv_output.py index 793fad2..6d93d66 100644 --- a/nmigen/compat/fhdl/conv_output.py +++ b/nmigen/compat/fhdl/conv_output.py @@ -1,35 +1,6 @@ -from operator import itemgetter +from amaranth.compat.fhdl.conv_output import * -class ConvOutput: - def __init__(self): - self.main_source = "" - self.data_files = dict() - - def set_main_source(self, src): - self.main_source = src - - def add_data_file(self, filename_base, content): - filename = filename_base - i = 1 - while filename in self.data_files: - parts = filename_base.split(".", maxsplit=1) - parts[0] += "_" + str(i) - filename = ".".join(parts) - i += 1 - self.data_files[filename] = content - return filename - - def __str__(self): - r = self.main_source + "\n" - for filename, content in sorted(self.data_files.items(), - key=itemgetter(0)): - r += filename + ":\n" + content - return r - - def write(self, main_filename): - with open(main_filename, "w") as f: - f.write(self.main_source) - for filename, content in self.data_files.items(): - with open(filename, "w") as f: - f.write(content) +import warnings +warnings.warn("instead of nmigen.compat.fhdl.conv_output, use amaranth.compat.fhdl.conv_output", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/decorators.py b/nmigen/compat/fhdl/decorators.py index e61ccb5..a285588 100644 --- a/nmigen/compat/fhdl/decorators.py +++ b/nmigen/compat/fhdl/decorators.py @@ -1,55 +1,7 @@ -from ...hdl.ast import * -from ...hdl.xfrm import ResetInserter as NativeResetInserter -from ...hdl.xfrm import EnableInserter as NativeEnableInserter -from ...hdl.xfrm import DomainRenamer as NativeDomainRenamer -from ..._utils import deprecated +from amaranth.compat.fhdl.decorators import * +from amaranth.compat.fhdl.decorators import __all__ -__all__ = ["ResetInserter", "CEInserter", "ClockDomainsRenamer"] - - -class _CompatControlInserter: - _control_name = None - _native_inserter = None - - def __init__(self, clock_domains=None): - self.clock_domains = clock_domains - - def __call__(self, module): - if self.clock_domains is None: - signals = {self._control_name: ("sync", Signal(name=self._control_name))} - else: - def name(cd): - return self._control_name + "_" + cd - signals = {name(cd): (cd, Signal(name=name(cd))) for cd in self.clock_domains} - for name, (cd, signal) in signals.items(): - setattr(module, name, signal) - return self._native_inserter(dict(signals.values()))(module) - - -@deprecated("instead of `migen.fhdl.decorators.ResetInserter`, " - "use `nmigen.hdl.xfrm.ResetInserter`; note that nMigen ResetInserter accepts " - "a dict of reset signals (or a single reset signal) as an argument, not " - "a set of clock domain names (or a single clock domain name)") -class CompatResetInserter(_CompatControlInserter): - _control_name = "reset" - _native_inserter = NativeResetInserter - - -@deprecated("instead of `migen.fhdl.decorators.CEInserter`, " - "use `nmigen.hdl.xfrm.EnableInserter`; note that nMigen EnableInserter accepts " - "a dict of enable signals (or a single enable signal) as an argument, not " - "a set of clock domain names (or a single clock domain name)") -class CompatCEInserter(_CompatControlInserter): - _control_name = "ce" - _native_inserter = NativeEnableInserter - - -class CompatClockDomainsRenamer(NativeDomainRenamer): - def __init__(self, cd_remapping): - super().__init__(cd_remapping) - - -ResetInserter = CompatResetInserter -CEInserter = CompatCEInserter -ClockDomainsRenamer = CompatClockDomainsRenamer +import warnings +warnings.warn("instead of nmigen.compat.fhdl.decorators, use amaranth.compat.fhdl.decorators", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/module.py b/nmigen/compat/fhdl/module.py index e5b7796..c6df57c 100644 --- a/nmigen/compat/fhdl/module.py +++ b/nmigen/compat/fhdl/module.py @@ -1,163 +1,7 @@ -from collections.abc import Iterable - -from ..._utils import flatten, deprecated -from ...hdl import dsl, ir +from amaranth.compat.fhdl.module import * +from amaranth.compat.fhdl.module import __all__ -__all__ = ["Module", "FinalizeError"] - - -def _flat_list(e): - if isinstance(e, Iterable): - return list(flatten(e)) - else: - return [e] - - -class CompatFinalizeError(Exception): - pass - - -FinalizeError = CompatFinalizeError - - -class _CompatModuleProxy: - def __init__(self, cm): - object.__setattr__(self, "_cm", cm) - - -class _CompatModuleComb(_CompatModuleProxy): - @deprecated("instead of `self.comb +=`, use `m.d.comb +=`") - def __iadd__(self, assigns): - self._cm._module._add_statement(assigns, domain=None, depth=0, compat_mode=True) - return self - - -class _CompatModuleSyncCD: - def __init__(self, cm, cd): - self._cm = cm - self._cd = cd - - @deprecated("instead of `self.sync. +=`, use `m.d. +=`") - def __iadd__(self, assigns): - self._cm._module._add_statement(assigns, domain=self._cd, depth=0, compat_mode=True) - return self - - -class _CompatModuleSync(_CompatModuleProxy): - @deprecated("instead of `self.sync +=`, use `m.d.sync +=`") - def __iadd__(self, assigns): - self._cm._module._add_statement(assigns, domain="sync", depth=0, compat_mode=True) - return self - - def __getattr__(self, name): - return _CompatModuleSyncCD(self._cm, name) - - def __setattr__(self, name, value): - if not isinstance(value, _CompatModuleSyncCD): - raise AttributeError("Attempted to assign sync property - use += instead") - - -class _CompatModuleSpecials(_CompatModuleProxy): - @deprecated("instead of `self.specials. =`, use `m.submodules. =`") - def __setattr__(self, name, value): - self._cm._submodules.append((name, value)) - setattr(self._cm, name, value) - - @deprecated("instead of `self.specials +=`, use `m.submodules +=`") - def __iadd__(self, other): - self._cm._submodules += [(None, e) for e in _flat_list(other)] - return self - - -class _CompatModuleSubmodules(_CompatModuleProxy): - @deprecated("instead of `self.submodules. =`, use `m.submodules. =`") - def __setattr__(self, name, value): - self._cm._submodules.append((name, value)) - setattr(self._cm, name, value) - - @deprecated("instead of `self.submodules +=`, use `m.submodules +=`") - def __iadd__(self, other): - self._cm._submodules += [(None, e) for e in _flat_list(other)] - return self - - -class _CompatModuleClockDomains(_CompatModuleProxy): - @deprecated("instead of `self.clock_domains. =`, use `m.domains. =`") - def __setattr__(self, name, value): - self.__iadd__(value) - setattr(self._cm, name, value) - - @deprecated("instead of `self.clock_domains +=`, use `m.domains +=`") - def __iadd__(self, other): - self._cm._module.domains += _flat_list(other) - return self - - -class CompatModule(ir.Elaboratable): - _MustUse__silence = True - - # Actually returns another nMigen Elaboratable (nmigen.dsl.Module), not a Fragment. - def get_fragment(self): - assert not self.get_fragment_called - self.get_fragment_called = True - self.finalize() - return self._module - - def elaborate(self, platform): - if not self.get_fragment_called: - self.get_fragment() - return self._module - - def __getattr__(self, name): - if name == "comb": - return _CompatModuleComb(self) - elif name == "sync": - return _CompatModuleSync(self) - elif name == "specials": - return _CompatModuleSpecials(self) - elif name == "submodules": - return _CompatModuleSubmodules(self) - elif name == "clock_domains": - return _CompatModuleClockDomains(self) - elif name == "finalized": - self.finalized = False - return self.finalized - elif name == "_module": - self._module = dsl.Module() - return self._module - elif name == "_submodules": - self._submodules = [] - return self._submodules - elif name == "_clock_domains": - self._clock_domains = [] - return self._clock_domains - elif name == "get_fragment_called": - self.get_fragment_called = False - return self.get_fragment_called - else: - raise AttributeError("'{}' object has no attribute '{}'" - .format(type(self).__name__, name)) - - def finalize(self, *args, **kwargs): - def finalize_submodules(): - for name, submodule in self._submodules: - if not hasattr(submodule, "finalize"): - continue - if submodule.finalized: - continue - submodule.finalize(*args, **kwargs) - - if not self.finalized: - self.finalized = True - finalize_submodules() - self.do_finalize(*args, **kwargs) - finalize_submodules() - for name, submodule in self._submodules: - self._module._add_submodule(submodule, name) - - def do_finalize(self): - pass - - -Module = CompatModule +import warnings +warnings.warn("instead of nmigen.compat.fhdl.module, use amaranth.compat.fhdl.module", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/specials.py b/nmigen/compat/fhdl/specials.py index 901fc67..5c1e082 100644 --- a/nmigen/compat/fhdl/specials.py +++ b/nmigen/compat/fhdl/specials.py @@ -1,145 +1,7 @@ +from amaranth.compat.fhdl.specials import * +from amaranth.compat.fhdl.specials import __all__ + + import warnings - -from ..._utils import deprecated, extend -from ...hdl.ast import * -from ...hdl.ir import Elaboratable -from ...hdl.mem import Memory as NativeMemory -from ...hdl.ir import Fragment, Instance -from ...hdl.dsl import Module -from .module import Module as CompatModule -from .structure import Signal -from ...lib.io import Pin - - -__all__ = ["TSTriple", "Instance", "Memory", "READ_FIRST", "WRITE_FIRST", "NO_CHANGE"] - - -class TSTriple: - def __init__(self, bits_sign=None, min=None, max=None, reset_o=0, reset_oe=0, reset_i=0, - name=None): - self.o = Signal(bits_sign, min=min, max=max, reset=reset_o, - name=None if name is None else name + "_o") - self.oe = Signal(reset=reset_oe, - name=None if name is None else name + "_oe") - self.i = Signal(bits_sign, min=min, max=max, reset=reset_i, - name=None if name is None else name + "_i") - - def __len__(self): - return len(self.o) - - def get_tristate(self, io): - return Tristate(io, self.o, self.oe, self.i) - - -class Tristate(Elaboratable): - def __init__(self, target, o, oe, i=None): - self.target = target - self.o = o - self.oe = oe - self.i = i if i is not None else None - - def elaborate(self, platform): - if self.i is None: - pin = Pin(len(self.target), dir="oe") - pin.o = self.o - pin.oe = self.oe - return platform.get_tristate(pin, self.target, attrs={}, invert=None) - else: - pin = Pin(len(self.target), dir="io") - pin.o = self.o - pin.oe = self.oe - pin.i = self.i - return platform.get_input_output(pin, self.target, attrs={}, invert=None) - - m = Module() - if self.i is not None: - m.d.comb += self.i.eq(self.target) - m.submodules += Instance("$tribuf", - p_WIDTH=len(self.target), - i_EN=self.oe, - i_A=self.o, - o_Y=self.target, - ) - - f = m.elaborate(platform) - f.flatten = True - return f - - -(READ_FIRST, WRITE_FIRST, NO_CHANGE) = range(3) - - -class _MemoryPort(CompatModule): - def __init__(self, adr, dat_r, we=None, dat_w=None, async_read=False, re=None, - we_granularity=0, mode=WRITE_FIRST, clock_domain="sync"): - self.adr = adr - self.dat_r = dat_r - self.we = we - self.dat_w = dat_w - self.async_read = async_read - self.re = re - self.we_granularity = we_granularity - self.mode = mode - self.clock = ClockSignal(clock_domain) - - -@extend(NativeMemory) -@deprecated("it is not necessary or permitted to add Memory as a special or submodule") -def elaborate(self, platform): - return Fragment() - - -class CompatMemory(NativeMemory, Elaboratable): - def __init__(self, width, depth, init=None, name=None): - super().__init__(width=width, depth=depth, init=init, name=name) - - @deprecated("instead of `get_port()`, use `read_port()` and `write_port()`") - def get_port(self, write_capable=False, async_read=False, has_re=False, we_granularity=0, - mode=WRITE_FIRST, clock_domain="sync"): - if we_granularity >= self.width: - warnings.warn("do not specify `we_granularity` greater than memory width, as it " - "is a hard error in non-compatibility mode", - DeprecationWarning, stacklevel=1) - we_granularity = 0 - if we_granularity == 0: - warnings.warn("instead of `we_granularity=0`, use `we_granularity=None` or avoid " - "specifying it at all, as it is a hard error in non-compatibility mode", - DeprecationWarning, stacklevel=1) - we_granularity = None - assert mode != NO_CHANGE - rdport = self.read_port(domain="comb" if async_read else clock_domain, - transparent=mode == WRITE_FIRST) - rdport.addr.name = "{}_addr".format(self.name) - adr = rdport.addr - dat_r = rdport.data - if write_capable: - wrport = self.write_port(domain=clock_domain, granularity=we_granularity) - wrport.addr = rdport.addr - we = wrport.en - dat_w = wrport.data - else: - we = None - dat_w = None - if has_re: - if mode == READ_FIRST: - re = rdport.en - else: - warnings.warn("the combination of `has_re=True` and `mode=WRITE_FIRST` has " - "surprising behavior: keeping `re` low would merely latch " - "the address, while the data will change with changing memory " - "contents; avoid using `re` with transparent ports as it is a hard " - "error in non-compatibility mode", - DeprecationWarning, stacklevel=1) - re = Signal() - else: - re = None - mp = _MemoryPort(adr, dat_r, we, dat_w, - async_read, re, we_granularity, mode, - clock_domain) - mp.submodules.rdport = rdport - if write_capable: - mp.submodules.wrport = wrport - return mp - - -Memory = CompatMemory +warnings.warn("instead of nmigen.compat.fhdl.specials, use amaranth.compat.fhdl.specials", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/structure.py b/nmigen/compat/fhdl/structure.py index d450e45..eae4a3f 100644 --- a/nmigen/compat/fhdl/structure.py +++ b/nmigen/compat/fhdl/structure.py @@ -1,185 +1,7 @@ -import builtins +from amaranth.compat.fhdl.structure import * +from amaranth.compat.fhdl.structure import __all__ + + import warnings -from collections import OrderedDict - -from ...utils import bits_for -from ..._utils import deprecated, extend -from ...hdl import ast -from ...hdl.ast import (DUID, - Shape, signed, unsigned, - Value, Const, C, Mux, Slice as _Slice, Part, Cat, Repl, - Signal as NativeSignal, - ClockSignal, ResetSignal, - Array, ArrayProxy as _ArrayProxy) -from ...hdl.cd import ClockDomain - - -__all__ = ["DUID", "wrap", "Mux", "Cat", "Replicate", "Constant", "C", "Signal", "ClockSignal", - "ResetSignal", "If", "Case", "Array", "ClockDomain"] - - -@deprecated("instead of `wrap`, use `Value.cast`") -def wrap(v): - return Value.cast(v) - - -class CompatSignal(NativeSignal): - def __init__(self, bits_sign=None, name=None, variable=False, reset=0, - reset_less=False, name_override=None, min=None, max=None, - related=None, attr=None, src_loc_at=0, **kwargs): - if min is not None or max is not None: - warnings.warn("instead of `Signal(min={min}, max={max})`, " - "use `Signal(range({min}, {max}))`" - .format(min=min or 0, max=max or 2), - DeprecationWarning, stacklevel=2 + src_loc_at) - - if bits_sign is None: - if min is None: - min = 0 - if max is None: - max = 2 - max -= 1 # make both bounds inclusive - if min > max: - raise ValueError("Lower bound {} should be less or equal to higher bound {}" - .format(min, max + 1)) - sign = min < 0 or max < 0 - if min == max: - bits = 0 - else: - bits = builtins.max(bits_for(min, sign), bits_for(max, sign)) - shape = signed(bits) if sign else unsigned(bits) - else: - if not (min is None and max is None): - raise ValueError("Only one of bits/signedness or bounds may be specified") - shape = bits_sign - - super().__init__(shape=shape, name=name_override or name, - reset=reset, reset_less=reset_less, - attrs=attr, src_loc_at=1 + src_loc_at, **kwargs) - - -Signal = CompatSignal - - -@deprecated("instead of `Constant`, use `Const`") -def Constant(value, bits_sign=None): - return Const(value, bits_sign) - - -@deprecated("instead of `Replicate`, use `Repl`") -def Replicate(v, n): - return Repl(v, n) - - -@extend(Const) -@property -@deprecated("instead of `.nbits`, use `.width`") -def nbits(self): - return self.width - - -@extend(NativeSignal) -@property -@deprecated("instead of `.nbits`, use `.width`") -def nbits(self): - return self.width - - -@extend(NativeSignal) -@NativeSignal.nbits.setter -@deprecated("instead of `.nbits = x`, use `.width = x`") -def nbits(self, value): - self.width = value - - -@extend(NativeSignal) -@deprecated("instead of `.part`, use `.bit_select`") -def part(self, offset, width): - return Part(self, offset, width, src_loc_at=2) - - -@extend(Cat) -@property -@deprecated("instead of `.l`, use `.parts`") -def l(self): - return self.parts - - -@extend(ast.Operator) -@property -@deprecated("instead of `.op`, use `.operator`") -def op(self): - return self.operator - - -@extend(_ArrayProxy) -@property -@deprecated("instead `_ArrayProxy.choices`, use `ArrayProxy.elems`") -def choices(self): - return self.elems - - -class If(ast.Switch): - @deprecated("instead of `If(cond, ...)`, use `with m.If(cond): ...`") - def __init__(self, cond, *stmts): - cond = Value.cast(cond) - if len(cond) != 1: - cond = cond.bool() - super().__init__(cond, {("1",): ast.Statement.cast(stmts)}) - - @deprecated("instead of `.Elif(cond, ...)`, use `with m.Elif(cond): ...`") - def Elif(self, cond, *stmts): - cond = Value.cast(cond) - if len(cond) != 1: - cond = cond.bool() - self.cases = OrderedDict((("-" + k,), v) for (k,), v in self.cases.items()) - self.cases[("1" + "-" * len(self.test),)] = ast.Statement.cast(stmts) - self.test = Cat(self.test, cond) - return self - - @deprecated("instead of `.Else(...)`, use `with m.Else(): ...`") - def Else(self, *stmts): - self.cases[()] = ast.Statement.cast(stmts) - return self - - -class Case(ast.Switch): - @deprecated("instead of `Case(test, { value: stmts })`, use `with m.Switch(test):` and " - "`with m.Case(value): stmts`; instead of `\"default\": stmts`, use " - "`with m.Case(): stmts`") - def __init__(self, test, cases): - new_cases = [] - default = None - for k, v in cases.items(): - if isinstance(k, (bool, int)): - k = Const(k) - if (not isinstance(k, Const) - and not (isinstance(k, str) and k == "default")): - raise TypeError("Case object is not a Migen constant") - if isinstance(k, str) and k == "default": - default = v - continue - else: - k = k.value - new_cases.append((k, v)) - if default is not None: - new_cases.append((None, default)) - super().__init__(test, OrderedDict(new_cases)) - - @deprecated("instead of `Case(...).makedefault()`, use an explicit default case: " - "`with m.Case(): ...`") - def makedefault(self, key=None): - if key is None: - for choice in self.cases.keys(): - if (key is None - or (isinstance(choice, str) and choice == "default") - or choice > key): - key = choice - elif isinstance(key, str) and key == "default": - key = () - else: - key = ("{:0{}b}".format(ast.Value.cast(key).value, len(self.test)),) - stmts = self.cases[key] - del self.cases[key] - self.cases[()] = stmts - return self +warnings.warn("instead of nmigen.compat.fhdl.structure, use amaranth.compat.fhdl.structure", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/fhdl/verilog.py b/nmigen/compat/fhdl/verilog.py index 3773cc6..f12add0 100644 --- a/nmigen/compat/fhdl/verilog.py +++ b/nmigen/compat/fhdl/verilog.py @@ -1,35 +1,6 @@ +from amaranth.compat.fhdl.verilog import * + + import warnings - -from ...hdl.ir import Fragment -from ...hdl.cd import ClockDomain -from ...back import verilog -from .conv_output import ConvOutput -from .module import Module - - -def convert(fi, ios=None, name="top", special_overrides=dict(), - attr_translate=None, create_clock_domains=True, - display_run=False): - if display_run: - warnings.warn("`display_run=True` support has been removed", - DeprecationWarning, stacklevel=1) - if special_overrides: - warnings.warn("`special_overrides` support as well as `Special` has been removed", - DeprecationWarning, stacklevel=1) - # TODO: attr_translate - - if isinstance(fi, Module): - fi = fi.get_fragment() - - def missing_domain(name): - if create_clock_domains: - return ClockDomain(name) - v_output = verilog.convert( - elaboratable=fi, - name=name, - ports=ios or (), - missing_domain=missing_domain - ) - output = ConvOutput() - output.set_main_source(v_output) - return output +warnings.warn("instead of nmigen.compat.fhdl.verilog, use amaranth.compat.fhdl.verilog", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/__init__.py b/nmigen/compat/genlib/__init__.py index e69de29..8ab81f2 100644 --- a/nmigen/compat/genlib/__init__.py +++ b/nmigen/compat/genlib/__init__.py @@ -0,0 +1,6 @@ +from amaranth.compat.genlib import * + + +import warnings +warnings.warn("instead of nmigen.compat.genlib, use amaranth.compat.genlib", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/cdc.py b/nmigen/compat/genlib/cdc.py index 9da98bd..61fb8bc 100644 --- a/nmigen/compat/genlib/cdc.py +++ b/nmigen/compat/genlib/cdc.py @@ -1,74 +1,7 @@ +from amaranth.compat.genlib.cdc import * +from amaranth.compat.genlib.cdc import __all__ + + import warnings - -from ..._utils import deprecated -from ...lib.cdc import FFSynchronizer as NativeFFSynchronizer -from ...lib.cdc import PulseSynchronizer as NativePulseSynchronizer -from ...hdl.ast import * -from ..fhdl.module import CompatModule -from ..fhdl.structure import If - - -__all__ = ["MultiReg", "PulseSynchronizer", "GrayCounter", "GrayDecoder"] - - -class MultiReg(NativeFFSynchronizer): - def __init__(self, i, o, odomain="sync", n=2, reset=0): - old_opts = [] - new_opts = [] - if odomain != "sync": - old_opts.append(", odomain={!r}".format(odomain)) - new_opts.append(", o_domain={!r}".format(odomain)) - if n != 2: - old_opts.append(", n={!r}".format(n)) - new_opts.append(", stages={!r}".format(n)) - warnings.warn("instead of `MultiReg(...{})`, use `FFSynchronizer(...{})`" - .format("".join(old_opts), "".join(new_opts)), - DeprecationWarning, stacklevel=2) - super().__init__(i, o, o_domain=odomain, stages=n, reset=reset) - self.odomain = odomain - - -@deprecated("instead of `migen.genlib.cdc.PulseSynchronizer`, use `nmigen.lib.cdc.PulseSynchronizer`") -class PulseSynchronizer(NativePulseSynchronizer): - def __init__(self, idomain, odomain): - super().__init__(i_domain=idomain, o_domain=odomain) - - -@deprecated("instead of `migen.genlib.cdc.GrayCounter`, use `nmigen.lib.coding.GrayEncoder`") -class GrayCounter(CompatModule): - def __init__(self, width): - self.ce = Signal() - self.q = Signal(width) - self.q_next = Signal(width) - self.q_binary = Signal(width) - self.q_next_binary = Signal(width) - - ### - - self.comb += [ - If(self.ce, - self.q_next_binary.eq(self.q_binary + 1) - ).Else( - self.q_next_binary.eq(self.q_binary) - ), - self.q_next.eq(self.q_next_binary ^ self.q_next_binary[1:]) - ] - self.sync += [ - self.q_binary.eq(self.q_next_binary), - self.q.eq(self.q_next) - ] - - -@deprecated("instead of `migen.genlib.cdc.GrayDecoder`, use `nmigen.lib.coding.GrayDecoder`") -class GrayDecoder(CompatModule): - def __init__(self, width): - self.i = Signal(width) - self.o = Signal(width, reset_less=True) - - # # # - - o_comb = Signal(width) - self.comb += o_comb[-1].eq(self.i[-1]) - for i in reversed(range(width-1)): - self.comb += o_comb[i].eq(o_comb[i+1] ^ self.i[i]) - self.sync += self.o.eq(o_comb) +warnings.warn("instead of nmigen.compat.genlib.cdc, use amaranth.compat.genlib.cdc", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/coding.py b/nmigen/compat/genlib/coding.py index 44154a2..51188ad 100644 --- a/nmigen/compat/genlib/coding.py +++ b/nmigen/compat/genlib/coding.py @@ -1,4 +1,7 @@ -from ...lib.coding import * +from amaranth.compat.genlib.coding import * +from amaranth.compat.genlib.coding import __all__ -__all__ = ["Encoder", "PriorityEncoder", "Decoder", "PriorityDecoder"] +import warnings +warnings.warn("instead of nmigen.compat.genlib.coding, use amaranth.compat.genlib.coding", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/fifo.py b/nmigen/compat/genlib/fifo.py index a944428..1760205 100644 --- a/nmigen/compat/genlib/fifo.py +++ b/nmigen/compat/genlib/fifo.py @@ -1,147 +1,7 @@ -from ..._utils import deprecated, extend -from ...lib.fifo import (FIFOInterface as NativeFIFOInterface, - SyncFIFO as NativeSyncFIFO, SyncFIFOBuffered as NativeSyncFIFOBuffered, - AsyncFIFO as NativeAsyncFIFO, AsyncFIFOBuffered as NativeAsyncFIFOBuffered) +from amaranth.compat.genlib.fifo import * +from amaranth.compat.genlib.fifo import __all__ -__all__ = ["_FIFOInterface", "SyncFIFO", "SyncFIFOBuffered", "AsyncFIFO", "AsyncFIFOBuffered"] - - -class CompatFIFOInterface(NativeFIFOInterface): - @deprecated("attribute `fwft` must be provided to FIFOInterface constructor") - def __init__(self, width, depth): - super().__init__(width=width, depth=depth, fwft=False) - del self.fwft - - -@extend(NativeFIFOInterface) -@property -@deprecated("instead of `fifo.din`, use `fifo.w_data`") -def din(self): - return self.w_data - - -@extend(NativeFIFOInterface) -@NativeFIFOInterface.din.setter -@deprecated("instead of `fifo.din = x`, use `fifo.w_data = x`") -def din(self, w_data): - self.w_data = w_data - - -@extend(NativeFIFOInterface) -@property -@deprecated("instead of `fifo.writable`, use `fifo.w_rdy`") -def writable(self): - return self.w_rdy - - -@extend(NativeFIFOInterface) -@NativeFIFOInterface.writable.setter -@deprecated("instead of `fifo.writable = x`, use `fifo.w_rdy = x`") -def writable(self, w_rdy): - self.w_rdy = w_rdy - - -@extend(NativeFIFOInterface) -@property -@deprecated("instead of `fifo.we`, use `fifo.w_en`") -def we(self): - return self.w_en - - -@extend(NativeFIFOInterface) -@NativeFIFOInterface.we.setter -@deprecated("instead of `fifo.we = x`, use `fifo.w_en = x`") -def we(self, w_en): - self.w_en = w_en - - -@extend(NativeFIFOInterface) -@property -@deprecated("instead of `fifo.dout`, use `fifo.r_data`") -def dout(self): - return self.r_data - - -@extend(NativeFIFOInterface) -@NativeFIFOInterface.dout.setter -@deprecated("instead of `fifo.dout = x`, use `fifo.r_data = x`") -def dout(self, r_data): - self.r_data = r_data - - -@extend(NativeFIFOInterface) -@property -@deprecated("instead of `fifo.readable`, use `fifo.r_rdy`") -def readable(self): - return self.r_rdy - - -@extend(NativeFIFOInterface) -@NativeFIFOInterface.readable.setter -@deprecated("instead of `fifo.readable = x`, use `fifo.r_rdy = x`") -def readable(self, r_rdy): - self.r_rdy = r_rdy - - -@extend(NativeFIFOInterface) -@property -@deprecated("instead of `fifo.re`, use `fifo.r_en`") -def re(self): - return self.r_en - - -@extend(NativeFIFOInterface) -@NativeFIFOInterface.re.setter -@deprecated("instead of `fifo.re = x`, use `fifo.r_en = x`") -def re(self, r_en): - self.r_en = r_en - - -@extend(NativeFIFOInterface) -def read(self): - """Read method for simulation.""" - assert (yield self.r_rdy) - value = (yield self.r_data) - yield self.r_en.eq(1) - yield - yield self.r_en.eq(0) - yield - return value - -@extend(NativeFIFOInterface) -def write(self, data): - """Write method for simulation.""" - assert (yield self.w_rdy) - yield self.w_data.eq(data) - yield self.w_en.eq(1) - yield - yield self.w_en.eq(0) - yield - - -class CompatSyncFIFO(NativeSyncFIFO): - def __init__(self, width, depth, fwft=True): - super().__init__(width=width, depth=depth, fwft=fwft) - - -class CompatSyncFIFOBuffered(NativeSyncFIFOBuffered): - def __init__(self, width, depth): - super().__init__(width=width, depth=depth) - - -class CompatAsyncFIFO(NativeAsyncFIFO): - def __init__(self, width, depth): - super().__init__(width=width, depth=depth) - - -class CompatAsyncFIFOBuffered(NativeAsyncFIFOBuffered): - def __init__(self, width, depth): - super().__init__(width=width, depth=depth) - - -_FIFOInterface = CompatFIFOInterface -SyncFIFO = CompatSyncFIFO -SyncFIFOBuffered = CompatSyncFIFOBuffered -AsyncFIFO = CompatAsyncFIFO -AsyncFIFOBuffered = CompatAsyncFIFOBuffered +import warnings +warnings.warn("instead of nmigen.compat.genlib.fifo, use amaranth.compat.genlib.fifo", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/fsm.py b/nmigen/compat/genlib/fsm.py index 6904550..7dc81f4 100644 --- a/nmigen/compat/genlib/fsm.py +++ b/nmigen/compat/genlib/fsm.py @@ -1,193 +1,7 @@ -from collections import OrderedDict - -from ..._utils import deprecated, _ignore_deprecated -from ...hdl.xfrm import ValueTransformer, StatementTransformer -from ...hdl.ast import * -from ...hdl.ast import Signal as NativeSignal -from ..fhdl.module import CompatModule, CompatFinalizeError -from ..fhdl.structure import Signal, If, Case +from amaranth.compat.genlib.fsm import * +from amaranth.compat.genlib.fsm import __all__ -__all__ = ["AnonymousState", "NextState", "NextValue", "FSM"] - - -class AnonymousState: - pass - - -class NextState(Statement): - def __init__(self, state): - super().__init__() - self.state = state - - -class NextValue(Statement): - def __init__(self, target, value): - super().__init__() - self.target = target - self.value = value - - -def _target_eq(a, b): - if type(a) != type(b): - return False - ty = type(a) - if ty == Const: - return a.value == b.value - elif ty == NativeSignal or ty == Signal: - return a is b - elif ty == Cat: - return all(_target_eq(x, y) for x, y in zip(a.l, b.l)) - elif ty == Slice: - return (_target_eq(a.value, b.value) - and a.start == b.start - and a.stop == b.stop) - elif ty == Part: - return (_target_eq(a.value, b.value) - and _target_eq(a.offset == b.offset) - and a.width == b.width) - elif ty == ArrayProxy: - return (all(_target_eq(x, y) for x, y in zip(a.choices, b.choices)) - and _target_eq(a.key, b.key)) - else: - raise ValueError("NextValue cannot be used with target type '{}'" - .format(ty)) - - -class _LowerNext(ValueTransformer, StatementTransformer): - def __init__(self, next_state_signal, encoding, aliases): - self.next_state_signal = next_state_signal - self.encoding = encoding - self.aliases = aliases - # (target, next_value_ce, next_value) - self.registers = [] - - def _get_register_control(self, target): - for x in self.registers: - if _target_eq(target, x[0]): - return x[1], x[2] - raise KeyError - - def on_unknown_statement(self, node): - if isinstance(node, NextState): - try: - actual_state = self.aliases[node.state] - except KeyError: - actual_state = node.state - return self.next_state_signal.eq(self.encoding[actual_state]) - elif isinstance(node, NextValue): - try: - next_value_ce, next_value = self._get_register_control(node.target) - except KeyError: - related = node.target if isinstance(node.target, Signal) else None - next_value = Signal(node.target.shape(), - name=None if related is None else "{}_fsm_next".format(related.name)) - next_value_ce = Signal( - name=None if related is None else "{}_fsm_next_ce".format(related.name)) - self.registers.append((node.target, next_value_ce, next_value)) - return next_value.eq(node.value), next_value_ce.eq(1) - else: - return node - - -@deprecated("instead of `migen.genlib.fsm.FSM()`, use `with m.FSM():`; note that there is no " - "replacement for `{before,after}_{entering,leaving}` and `delayed_enter` methods") -class FSM(CompatModule): - def __init__(self, reset_state=None): - self.actions = OrderedDict() - self.state_aliases = dict() - self.reset_state = reset_state - - self.before_entering_signals = OrderedDict() - self.before_leaving_signals = OrderedDict() - self.after_entering_signals = OrderedDict() - self.after_leaving_signals = OrderedDict() - - def act(self, state, *statements): - if self.finalized: - raise CompatFinalizeError - if self.reset_state is None: - self.reset_state = state - if state not in self.actions: - self.actions[state] = [] - self.actions[state] += statements - - def delayed_enter(self, name, target, delay): - if self.finalized: - raise CompatFinalizeError - if delay > 0: - state = name - for i in range(delay): - if i == delay - 1: - next_state = target - else: - next_state = AnonymousState() - self.act(state, NextState(next_state)) - state = next_state - else: - self.state_aliases[name] = target - - def ongoing(self, state): - is_ongoing = Signal() - self.act(state, is_ongoing.eq(1)) - return is_ongoing - - def _get_signal(self, d, state): - if state not in self.actions: - self.actions[state] = [] - try: - return d[state] - except KeyError: - is_el = Signal() - d[state] = is_el - return is_el - - def before_entering(self, state): - return self._get_signal(self.before_entering_signals, state) - - def before_leaving(self, state): - return self._get_signal(self.before_leaving_signals, state) - - def after_entering(self, state): - signal = self._get_signal(self.after_entering_signals, state) - self.sync += signal.eq(self.before_entering(state)) - return signal - - def after_leaving(self, state): - signal = self._get_signal(self.after_leaving_signals, state) - self.sync += signal.eq(self.before_leaving(state)) - return signal - - @_ignore_deprecated - def do_finalize(self): - nstates = len(self.actions) - self.encoding = dict((s, n) for n, s in enumerate(self.actions.keys())) - self.decoding = {n: s for s, n in self.encoding.items()} - - decoder = lambda n: "{}/{}".format(self.decoding[n], n) - self.state = Signal(range(nstates), reset=self.encoding[self.reset_state], decoder=decoder) - self.next_state = Signal.like(self.state) - - for state, signal in self.before_leaving_signals.items(): - encoded = self.encoding[state] - self.comb += signal.eq((self.state == encoded) & ~(self.next_state == encoded)) - if self.reset_state in self.after_entering_signals: - self.after_entering_signals[self.reset_state].reset = 1 - for state, signal in self.before_entering_signals.items(): - encoded = self.encoding[state] - self.comb += signal.eq(~(self.state == encoded) & (self.next_state == encoded)) - - self._finalize_sync(self._lower_controls()) - - def _lower_controls(self): - return _LowerNext(self.next_state, self.encoding, self.state_aliases) - - def _finalize_sync(self, ls): - cases = dict((self.encoding[k], ls.on_statement(v)) for k, v in self.actions.items() if v) - self.comb += [ - self.next_state.eq(self.state), - Case(self.state, cases).makedefault(self.encoding[self.reset_state]) - ] - self.sync += self.state.eq(self.next_state) - for register, next_value_ce, next_value in ls.registers: - self.sync += If(next_value_ce, register.eq(next_value)) +import warnings +warnings.warn("instead of nmigen.compat.genlib.fsm, use amaranth.compat.genlib.fsm", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/record.py b/nmigen/compat/genlib/record.py index 69ce0e2..6cd5b23 100644 --- a/nmigen/compat/genlib/record.py +++ b/nmigen/compat/genlib/record.py @@ -1,195 +1,6 @@ -from ...tracer import * -from ..fhdl.structure import * - -from functools import reduce -from operator import or_ +from amaranth.compat.genlib.record import * -(DIR_NONE, DIR_S_TO_M, DIR_M_TO_S) = range(3) - -# Possible layout elements: -# 1. (name, size) -# 2. (name, size, direction) -# 3. (name, sublayout) -# size can be an int, or a (int, bool) tuple for signed numbers -# sublayout must be a list - - -def set_layout_parameters(layout, **layout_dict): - def resolve(p): - if isinstance(p, str): - try: - return layout_dict[p] - except KeyError: - return p - else: - return p - - r = [] - for f in layout: - if isinstance(f[1], (int, tuple, str)): # cases 1/2 - if len(f) == 3: - r.append((f[0], resolve(f[1]), f[2])) - else: - r.append((f[0], resolve(f[1]))) - elif isinstance(f[1], list): # case 3 - r.append((f[0], set_layout_parameters(f[1], **layout_dict))) - else: - raise TypeError - return r - - -def layout_len(layout): - r = 0 - for f in layout: - if isinstance(f[1], (int, tuple)): # cases 1/2 - if len(f) == 3: - fname, fsize, fdirection = f - else: - fname, fsize = f - elif isinstance(f[1], list): # case 3 - fname, fsublayout = f - fsize = layout_len(fsublayout) - else: - raise TypeError - if isinstance(fsize, tuple): - r += fsize[0] - else: - r += fsize - return r - - -def layout_get(layout, name): - for f in layout: - if f[0] == name: - return f - raise KeyError(name) - - -def layout_partial(layout, *elements): - r = [] - for path in elements: - path_s = path.split("/") - last = path_s.pop() - copy_ref = layout - insert_ref = r - for hop in path_s: - name, copy_ref = layout_get(copy_ref, hop) - try: - name, insert_ref = layout_get(insert_ref, hop) - except KeyError: - new_insert_ref = [] - insert_ref.append((hop, new_insert_ref)) - insert_ref = new_insert_ref - insert_ref.append(layout_get(copy_ref, last)) - return r - - -class Record: - def __init__(self, layout, name=None, **kwargs): - try: - self.name = get_var_name() - except NameNotFound: - self.name = "" - self.layout = layout - - if self.name: - prefix = self.name + "_" - else: - prefix = "" - for f in self.layout: - if isinstance(f[1], (int, tuple)): # cases 1/2 - if(len(f) == 3): - fname, fsize, fdirection = f - else: - fname, fsize = f - finst = Signal(fsize, name=prefix + fname, **kwargs) - elif isinstance(f[1], list): # case 3 - fname, fsublayout = f - finst = Record(fsublayout, prefix + fname, **kwargs) - else: - raise TypeError - setattr(self, fname, finst) - - def eq(self, other): - return [getattr(self, f[0]).eq(getattr(other, f[0])) - for f in self.layout if hasattr(other, f[0])] - - def iter_flat(self): - for f in self.layout: - e = getattr(self, f[0]) - if isinstance(e, Signal): - if len(f) == 3: - yield e, f[2] - else: - yield e, DIR_NONE - elif isinstance(e, Record): - yield from e.iter_flat() - else: - raise TypeError - - def flatten(self): - return [signal for signal, direction in self.iter_flat()] - - def raw_bits(self): - return Cat(*self.flatten()) - - def connect(self, *slaves, keep=None, omit=None): - if keep is None: - _keep = set([f[0] for f in self.layout]) - elif isinstance(keep, list): - _keep = set(keep) - else: - _keep = keep - if omit is None: - _omit = set() - elif isinstance(omit, list): - _omit = set(omit) - else: - _omit = omit - - _keep = _keep - _omit - - r = [] - for f in self.layout: - field = f[0] - self_e = getattr(self, field) - if isinstance(self_e, Signal): - if field in _keep: - direction = f[2] - if direction == DIR_M_TO_S: - r += [getattr(slave, field).eq(self_e) for slave in slaves] - elif direction == DIR_S_TO_M: - r.append(self_e.eq(reduce(or_, [getattr(slave, field) for slave in slaves]))) - else: - raise TypeError - else: - for slave in slaves: - r += self_e.connect(getattr(slave, field), keep=keep, omit=omit) - return r - - def connect_flat(self, *slaves): - r = [] - iter_slaves = [slave.iter_flat() for slave in slaves] - for m_signal, m_direction in self.iter_flat(): - if m_direction == DIR_M_TO_S: - for iter_slave in iter_slaves: - s_signal, s_direction = next(iter_slave) - assert(s_direction == DIR_M_TO_S) - r.append(s_signal.eq(m_signal)) - elif m_direction == DIR_S_TO_M: - s_signals = [] - for iter_slave in iter_slaves: - s_signal, s_direction = next(iter_slave) - assert(s_direction == DIR_S_TO_M) - s_signals.append(s_signal) - r.append(m_signal.eq(reduce(or_, s_signals))) - else: - raise TypeError - return r - - def __len__(self): - return layout_len(self.layout) - - def __repr__(self): - return "" +import warnings +warnings.warn("instead of nmigen.compat.genlib.record, use amaranth.compat.genlib.record", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/resetsync.py b/nmigen/compat/genlib/resetsync.py index 66f0bae..7577cae 100644 --- a/nmigen/compat/genlib/resetsync.py +++ b/nmigen/compat/genlib/resetsync.py @@ -1,16 +1,7 @@ -from ..._utils import deprecated -from ...lib.cdc import ResetSynchronizer as NativeResetSynchronizer +from amaranth.compat.genlib.resetsync import * +from amaranth.compat.genlib.resetsync import __all__ -__all__ = ["AsyncResetSynchronizer"] - - -@deprecated("instead of `migen.genlib.resetsync.AsyncResetSynchronizer`, " - "use `nmigen.lib.cdc.ResetSynchronizer`; note that ResetSynchronizer accepts " - "a clock domain name as an argument, not a clock domain object") -class CompatResetSynchronizer(NativeResetSynchronizer): - def __init__(self, cd, async_reset): - super().__init__(async_reset, domain=cd.name) - - -AsyncResetSynchronizer = CompatResetSynchronizer +import warnings +warnings.warn("instead of nmigen.compat.genlib.resetsync, use amaranth.compat.genlib.resetsync", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/genlib/roundrobin.py b/nmigen/compat/genlib/roundrobin.py index 3333900..72f1675 100644 --- a/nmigen/compat/genlib/roundrobin.py +++ b/nmigen/compat/genlib/roundrobin.py @@ -1,58 +1,7 @@ +from amaranth.compat.genlib.roundrobin import * +from amaranth.compat.genlib.roundrobin import __all__ + + import warnings - -from ..fhdl.structure import Signal, If, Case -from ..fhdl.module import CompatModule - - -__all__ = ["RoundRobin", "SP_WITHDRAW", "SP_CE"] - -(SP_WITHDRAW, SP_CE) = range(2) - -class CompatRoundRobin(CompatModule): - def __init__(self, n, switch_policy=SP_WITHDRAW): - self.request = Signal(n) - self.grant = Signal(max=max(2, n)) - self.switch_policy = switch_policy - if self.switch_policy == SP_CE: - warnings.warn("instead of `migen.genlib.roundrobin.RoundRobin`, " - "use `nmigen.lib.scheduler.RoundRobin`; note that RoundRobin does not " - "require a policy anymore but to get the same behavior as SP_CE you" - "should use an EnableInserter", - DeprecationWarning, stacklevel=1) - self.ce = Signal() - else: - warnings.warn("instead of `migen.genlib.roundrobin.RoundRobin`, " - "use `nmigen.lib.scheduler.RoundRobin`; note that RoundRobin does not " - "require a policy anymore", - DeprecationWarning, stacklevel=1) - - ### - - if n > 1: - cases = {} - for i in range(n): - switch = [] - for j in reversed(range(i+1, i+n)): - t = j % n - switch = [ - If(self.request[t], - self.grant.eq(t) - ).Else( - *switch - ) - ] - if self.switch_policy == SP_WITHDRAW: - case = [If(~self.request[i], *switch)] - else: - case = switch - cases[i] = case - statement = Case(self.grant, cases) - if self.switch_policy == SP_CE: - statement = If(self.ce, statement) - self.sync += statement - else: - self.comb += self.grant.eq(0) - - - -RoundRobin = CompatRoundRobin +warnings.warn("instead of nmigen.compat.genlib.roundrobin, use amaranth.compat.genlib.roundrobin", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/compat/sim/__init__.py b/nmigen/compat/sim/__init__.py index 4c3d063..529cceb 100644 --- a/nmigen/compat/sim/__init__.py +++ b/nmigen/compat/sim/__init__.py @@ -1,54 +1,7 @@ -import functools -import inspect -from collections.abc import Iterable -from ...hdl.cd import ClockDomain -from ...hdl.ir import Fragment -from ...sim import * +from amaranth.compat.sim import * +from amaranth.compat.sim import __all__ -__all__ = ["run_simulation", "passive"] - - -def run_simulation(fragment_or_module, generators, clocks={"sync": 10}, vcd_name=None, - special_overrides={}): - assert not special_overrides - - if hasattr(fragment_or_module, "get_fragment"): - fragment = fragment_or_module.get_fragment() - else: - fragment = fragment_or_module - - fragment = Fragment.get(fragment, platform=None) - - if not isinstance(generators, dict): - generators = {"sync": generators} - if "sync" not in fragment.domains: - fragment.add_domains(ClockDomain("sync")) - - sim = Simulator(fragment) - for domain, period in clocks.items(): - sim.add_clock(period / 1e9, domain=domain) - for domain, processes in generators.items(): - def wrap(process): - def wrapper(): - yield from process - return wrapper - if isinstance(processes, Iterable) and not inspect.isgenerator(processes): - for process in processes: - sim.add_sync_process(wrap(process), domain=domain) - else: - sim.add_sync_process(wrap(processes), domain=domain) - - if vcd_name is not None: - with sim.write_vcd(vcd_name): - sim.run() - else: - sim.run() - - -def passive(generator): - @functools.wraps(generator) - def wrapper(*args, **kwargs): - yield Passive() - yield from generator(*args, **kwargs) - return wrapper +import warnings +warnings.warn("instead of nmigen.compat.sim, use amaranth.compat.sim", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/__init__.py b/nmigen/hdl/__init__.py index 770fc25..5a2fa04 100644 --- a/nmigen/hdl/__init__.py +++ b/nmigen/hdl/__init__.py @@ -1,20 +1,7 @@ -from .ast import Shape, unsigned, signed -from .ast import Value, Const, C, Mux, Cat, Repl, Array, Signal, ClockSignal, ResetSignal -from .dsl import Module -from .cd import ClockDomain -from .ir import Elaboratable, Fragment, Instance -from .mem import Memory -from .rec import Record -from .xfrm import DomainRenamer, ResetInserter, EnableInserter +from amaranth.hdl import * +from amaranth.hdl import __all__ -__all__ = [ - "Shape", "unsigned", "signed", - "Value", "Const", "C", "Mux", "Cat", "Repl", "Array", "Signal", "ClockSignal", "ResetSignal", - "Module", - "ClockDomain", - "Elaboratable", "Fragment", "Instance", - "Memory", - "Record", - "DomainRenamer", "ResetInserter", "EnableInserter", -] +import warnings +warnings.warn("instead of nmigen.hdl, use amaranth.hdl", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/ast.py b/nmigen/hdl/ast.py index ae010a0..b743345 100644 --- a/nmigen/hdl/ast.py +++ b/nmigen/hdl/ast.py @@ -1,1775 +1,7 @@ -from abc import ABCMeta, abstractmethod +from amaranth.hdl.ast import * +from amaranth.hdl.ast import __all__ + + import warnings -import functools -from collections import OrderedDict -from collections.abc import Iterable, MutableMapping, MutableSet, MutableSequence -from enum import Enum -from itertools import chain - -from .. import tracer -from .._utils import * -from .._unused import * - - -__all__ = [ - "Shape", "signed", "unsigned", - "Value", "Const", "C", "AnyConst", "AnySeq", "Operator", "Mux", "Part", "Slice", "Cat", "Repl", - "Array", "ArrayProxy", - "Signal", "ClockSignal", "ResetSignal", - "UserValue", "ValueCastable", - "Sample", "Past", "Stable", "Rose", "Fell", "Initial", - "Statement", "Switch", - "Property", "Assign", "Assert", "Assume", "Cover", - "ValueKey", "ValueDict", "ValueSet", "SignalKey", "SignalDict", "SignalSet", -] - - -class DUID: - """Deterministic Unique IDentifier.""" - __next_uid = 0 - def __init__(self): - self.duid = DUID.__next_uid - DUID.__next_uid += 1 - - -class Shape: - """Bit width and signedness of a value. - - A ``Shape`` can be constructed using: - * explicit bit width and signedness; - * aliases :func:`signed` and :func:`unsigned`; - * casting from a variety of objects. - - A ``Shape`` can be cast from: - * an integer, where the integer specifies the bit width; - * a range, where the result is wide enough to represent any element of the range, and is - signed if any element of the range is signed; - * an :class:`Enum` with all integer members or :class:`IntEnum`, where the result is wide - enough to represent any member of the enumeration, and is signed if any member of - the enumeration is signed. - - Parameters - ---------- - width : int - The number of bits in the representation, including the sign bit (if any). - signed : bool - If ``False``, the value is unsigned. If ``True``, the value is signed two's complement. - """ - def __init__(self, width=1, signed=False): - if not isinstance(width, int) or width < 0: - raise TypeError("Width must be a non-negative integer, not {!r}" - .format(width)) - self.width = width - self.signed = signed - - def __iter__(self): - return iter((self.width, self.signed)) - - @staticmethod - def cast(obj, *, src_loc_at=0): - if isinstance(obj, Shape): - return obj - if isinstance(obj, int): - return Shape(obj) - if isinstance(obj, tuple): - width, signed = obj - warnings.warn("instead of `{tuple}`, use `{constructor}({width})`" - .format(constructor="signed" if signed else "unsigned", width=width, - tuple=obj), - DeprecationWarning, stacklevel=2 + src_loc_at) - return Shape(width, signed) - if isinstance(obj, range): - if len(obj) == 0: - return Shape(0, obj.start < 0) - signed = obj.start < 0 or (obj.stop - obj.step) < 0 - width = max(bits_for(obj.start, signed), - bits_for(obj.stop - obj.step, signed)) - return Shape(width, signed) - if isinstance(obj, type) and issubclass(obj, Enum): - min_value = min(member.value for member in obj) - max_value = max(member.value for member in obj) - if not isinstance(min_value, int) or not isinstance(max_value, int): - raise TypeError("Only enumerations with integer values can be used " - "as value shapes") - signed = min_value < 0 or max_value < 0 - width = max(bits_for(min_value, signed), bits_for(max_value, signed)) - return Shape(width, signed) - raise TypeError("Object {!r} cannot be used as value shape".format(obj)) - - def __repr__(self): - if self.signed: - return "signed({})".format(self.width) - else: - return "unsigned({})".format(self.width) - - def __eq__(self, other): - if isinstance(other, tuple) and len(other) == 2: - width, signed = other - if isinstance(width, int) and isinstance(signed, bool): - return self.width == width and self.signed == signed - else: - raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, " - "not {!r}" - .format(other)) - if not isinstance(other, Shape): - raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, " - "not {!r}" - .format(other)) - return self.width == other.width and self.signed == other.signed - - -def unsigned(width): - """Shorthand for ``Shape(width, signed=False)``.""" - return Shape(width, signed=False) - - -def signed(width): - """Shorthand for ``Shape(width, signed=True)``.""" - return Shape(width, signed=True) - - -class Value(metaclass=ABCMeta): - @staticmethod - def cast(obj): - """Converts ``obj`` to an nMigen value. - - Booleans and integers are wrapped into a :class:`Const`. Enumerations whose members are - all integers are converted to a :class:`Const` with a shape that fits every member. - """ - if isinstance(obj, Value): - return obj - if isinstance(obj, int): - return Const(obj) - if isinstance(obj, Enum): - return Const(obj.value, Shape.cast(type(obj))) - if isinstance(obj, ValueCastable): - return obj.as_value() - raise TypeError("Object {!r} cannot be converted to an nMigen value".format(obj)) - - def __init__(self, *, src_loc_at=0): - super().__init__() - self.src_loc = tracer.get_src_loc(1 + src_loc_at) - - def __bool__(self): - raise TypeError("Attempted to convert nMigen value to Python boolean") - - def __invert__(self): - return Operator("~", [self]) - def __neg__(self): - return Operator("-", [self]) - - def __add__(self, other): - return Operator("+", [self, other]) - def __radd__(self, other): - return Operator("+", [other, self]) - def __sub__(self, other): - return Operator("-", [self, other]) - def __rsub__(self, other): - return Operator("-", [other, self]) - - def __mul__(self, other): - return Operator("*", [self, other]) - def __rmul__(self, other): - return Operator("*", [other, self]) - - def __check_divisor(self): - width, signed = self.shape() - if signed: - # Python's division semantics and Verilog's division semantics differ for negative - # divisors (Python uses div/mod, Verilog uses quo/rem); for now, avoid the issue - # completely by prohibiting such division operations. - raise NotImplementedError("Division by a signed value is not supported") - def __mod__(self, other): - other = Value.cast(other) - other.__check_divisor() - return Operator("%", [self, other]) - def __rmod__(self, other): - self.__check_divisor() - return Operator("%", [other, self]) - def __floordiv__(self, other): - other = Value.cast(other) - other.__check_divisor() - return Operator("//", [self, other]) - def __rfloordiv__(self, other): - self.__check_divisor() - return Operator("//", [other, self]) - - def __check_shamt(self): - width, signed = self.shape() - if signed: - # Neither Python nor HDLs implement shifts by negative values; prohibit any shifts - # by a signed value to make sure the shift amount can always be interpreted as - # an unsigned value. - raise TypeError("Shift amount must be unsigned") - def __lshift__(self, other): - other = Value.cast(other) - other.__check_shamt() - return Operator("<<", [self, other]) - def __rlshift__(self, other): - self.__check_shamt() - return Operator("<<", [other, self]) - def __rshift__(self, other): - other = Value.cast(other) - other.__check_shamt() - return Operator(">>", [self, other]) - def __rrshift__(self, other): - self.__check_shamt() - return Operator(">>", [other, self]) - - def __and__(self, other): - return Operator("&", [self, other]) - def __rand__(self, other): - return Operator("&", [other, self]) - def __xor__(self, other): - return Operator("^", [self, other]) - def __rxor__(self, other): - return Operator("^", [other, self]) - def __or__(self, other): - return Operator("|", [self, other]) - def __ror__(self, other): - return Operator("|", [other, self]) - - def __eq__(self, other): - return Operator("==", [self, other]) - def __ne__(self, other): - return Operator("!=", [self, other]) - def __lt__(self, other): - return Operator("<", [self, other]) - def __le__(self, other): - return Operator("<=", [self, other]) - def __gt__(self, other): - return Operator(">", [self, other]) - def __ge__(self, other): - return Operator(">=", [self, other]) - - def __abs__(self): - width, signed = self.shape() - if signed: - return Mux(self >= 0, self, -self) - else: - return self - - def __len__(self): - return self.shape().width - - def __getitem__(self, key): - n = len(self) - if isinstance(key, int): - if key not in range(-n, n): - raise IndexError(f"Index {key} is out of bounds for a {n}-bit value") - if key < 0: - key += n - return Slice(self, key, key + 1) - elif isinstance(key, slice): - start, stop, step = key.indices(n) - if step != 1: - return Cat(self[i] for i in range(start, stop, step)) - return Slice(self, start, stop) - else: - raise TypeError("Cannot index value with {}".format(repr(key))) - - def as_unsigned(self): - """Conversion to unsigned. - - Returns - ------- - Value, out - This ``Value`` reinterpreted as a unsigned integer. - """ - return Operator("u", [self]) - - def as_signed(self): - """Conversion to signed. - - Returns - ------- - Value, out - This ``Value`` reinterpreted as a signed integer. - """ - return Operator("s", [self]) - - def bool(self): - """Conversion to boolean. - - Returns - ------- - Value, out - ``1`` if any bits are set, ``0`` otherwise. - """ - return Operator("b", [self]) - - def any(self): - """Check if any bits are ``1``. - - Returns - ------- - Value, out - ``1`` if any bits are set, ``0`` otherwise. - """ - return Operator("r|", [self]) - - def all(self): - """Check if all bits are ``1``. - - Returns - ------- - Value, out - ``1`` if all bits are set, ``0`` otherwise. - """ - return Operator("r&", [self]) - - def xor(self): - """Compute pairwise exclusive-or of every bit. - - Returns - ------- - Value, out - ``1`` if an odd number of bits are set, ``0`` if an even number of bits are set. - """ - return Operator("r^", [self]) - - def implies(premise, conclusion): - """Implication. - - Returns - ------- - Value, out - ``0`` if ``premise`` is true and ``conclusion`` is not, ``1`` otherwise. - """ - return ~premise | conclusion - - def bit_select(self, offset, width): - """Part-select with bit granularity. - - Selects a constant width but variable offset part of a ``Value``, such that successive - parts overlap by all but 1 bit. - - Parameters - ---------- - offset : Value, int - Index of first selected bit. - width : int - Number of selected bits. - - Returns - ------- - Part, out - Selected part of the ``Value`` - """ - offset = Value.cast(offset) - if type(offset) is Const and isinstance(width, int): - return self[offset.value:offset.value + width] - return Part(self, offset, width, stride=1, src_loc_at=1) - - def word_select(self, offset, width): - """Part-select with word granularity. - - Selects a constant width but variable offset part of a ``Value``, such that successive - parts do not overlap. - - Parameters - ---------- - offset : Value, int - Index of first selected word. - width : int - Number of selected bits. - - Returns - ------- - Part, out - Selected part of the ``Value`` - """ - offset = Value.cast(offset) - if type(offset) is Const and isinstance(width, int): - return self[offset.value * width:(offset.value + 1) * width] - return Part(self, offset, width, stride=width, src_loc_at=1) - - def matches(self, *patterns): - """Pattern matching. - - Matches against a set of patterns, which may be integers or bit strings, recognizing - the same grammar as ``Case()``. - - Parameters - ---------- - patterns : int or str - Patterns to match against. - - Returns - ------- - Value, out - ``1`` if any pattern matches the value, ``0`` otherwise. - """ - matches = [] - for pattern in patterns: - if not isinstance(pattern, (int, str, Enum)): - raise SyntaxError("Match pattern must be an integer, a string, or an enumeration, " - "not {!r}" - .format(pattern)) - if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern): - raise SyntaxError("Match pattern '{}' must consist of 0, 1, and - (don't care) " - "bits, and may include whitespace" - .format(pattern)) - if (isinstance(pattern, str) and - len("".join(pattern.split())) != len(self)): - raise SyntaxError("Match pattern '{}' must have the same width as match value " - "(which is {})" - .format(pattern, len(self))) - if isinstance(pattern, int) and bits_for(pattern) > len(self): - warnings.warn("Match pattern '{:b}' is wider than match value " - "(which has width {}); comparison will never be true" - .format(pattern, len(self)), - SyntaxWarning, stacklevel=3) - continue - if isinstance(pattern, str): - pattern = "".join(pattern.split()) # remove whitespace - mask = int(pattern.replace("0", "1").replace("-", "0"), 2) - pattern = int(pattern.replace("-", "0"), 2) - matches.append((self & mask) == pattern) - elif isinstance(pattern, int): - matches.append(self == pattern) - elif isinstance(pattern, Enum): - matches.append(self == pattern.value) - else: - assert False - if not matches: - return Const(0) - elif len(matches) == 1: - return matches[0] - else: - return Cat(*matches).any() - - def shift_left(self, amount): - """Shift left by constant amount. - - Parameters - ---------- - amount : int - Amount to shift by. - - Returns - ------- - Value, out - If the amount is positive, the input shifted left. Otherwise, the input shifted right. - """ - if not isinstance(amount, int): - raise TypeError("Shift amount must be an integer, not {!r}".format(amount)) - if amount < 0: - return self.shift_right(-amount) - if self.shape().signed: - return Cat(Const(0, amount), self).as_signed() - else: - return Cat(Const(0, amount), self) # unsigned - - def shift_right(self, amount): - """Shift right by constant amount. - - Parameters - ---------- - amount : int - Amount to shift by. - - Returns - ------- - Value, out - If the amount is positive, the input shifted right. Otherwise, the input shifted left. - """ - if not isinstance(amount, int): - raise TypeError("Shift amount must be an integer, not {!r}".format(amount)) - if amount < 0: - return self.shift_left(-amount) - if self.shape().signed: - return self[amount:].as_signed() - else: - return self[amount:] # unsigned - - def rotate_left(self, amount): - """Rotate left by constant amount. - - Parameters - ---------- - amount : int - Amount to rotate by. - - Returns - ------- - Value, out - If the amount is positive, the input rotated left. Otherwise, the input rotated right. - """ - if not isinstance(amount, int): - raise TypeError("Rotate amount must be an integer, not {!r}".format(amount)) - amount %= len(self) - return Cat(self[-amount:], self[:-amount]) # meow :3 - - def rotate_right(self, amount): - """Rotate right by constant amount. - - Parameters - ---------- - amount : int - Amount to rotate by. - - Returns - ------- - Value, out - If the amount is positive, the input rotated right. Otherwise, the input rotated right. - """ - if not isinstance(amount, int): - raise TypeError("Rotate amount must be an integer, not {!r}".format(amount)) - amount %= len(self) - return Cat(self[amount:], self[:amount]) - - def eq(self, value): - """Assignment. - - Parameters - ---------- - value : Value, in - Value to be assigned. - - Returns - ------- - Assign - Assignment statement that can be used in combinatorial or synchronous context. - """ - return Assign(self, value, src_loc_at=1) - - @abstractmethod - def shape(self): - """Bit width and signedness of a value. - - Returns - ------- - Shape - See :class:`Shape`. - - Examples - -------- - >>> Signal(8).shape() - Shape(width=8, signed=False) - >>> Const(0xaa).shape() - Shape(width=8, signed=False) - """ - pass # :nocov: - - def _lhs_signals(self): - raise TypeError("Value {!r} cannot be used in assignments".format(self)) - - @abstractmethod - def _rhs_signals(self): - pass # :nocov: - - def _as_const(self): - raise TypeError("Value {!r} cannot be evaluated as constant".format(self)) - - __hash__ = None - - -@final -class Const(Value): - """A constant, literal integer value. - - Parameters - ---------- - value : int - shape : int or tuple or None - Either an integer ``width`` or a tuple ``(width, signed)`` specifying the number of bits - in this constant and whether it is signed (can represent negative values). - ``shape`` defaults to the minimum possible width and signedness of ``value``. - - Attributes - ---------- - width : int - signed : bool - """ - src_loc = None - - @staticmethod - def normalize(value, shape): - width, signed = shape - mask = (1 << width) - 1 - value &= mask - if signed and value >> (width - 1): - value |= ~mask - return value - - def __init__(self, value, shape=None, *, src_loc_at=0): - # We deliberately do not call Value.__init__ here. - self.value = int(value) - if shape is None: - shape = Shape(bits_for(self.value), signed=self.value < 0) - elif isinstance(shape, int): - shape = Shape(shape, signed=self.value < 0) - else: - shape = Shape.cast(shape, src_loc_at=1 + src_loc_at) - self.width, self.signed = shape - self.value = self.normalize(self.value, shape) - - def shape(self): - return Shape(self.width, self.signed) - - def _rhs_signals(self): - return SignalSet() - - def _as_const(self): - return self.value - - def __repr__(self): - return "(const {}'{}d{})".format(self.width, "s" if self.signed else "", self.value) - - -C = Const # shorthand - - -class AnyValue(Value, DUID): - def __init__(self, shape, *, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at) - if not isinstance(self.width, int) or self.width < 0: - raise TypeError("Width must be a non-negative integer, not {!r}" - .format(self.width)) - - def shape(self): - return Shape(self.width, self.signed) - - def _rhs_signals(self): - return SignalSet() - - -@final -class AnyConst(AnyValue): - def __repr__(self): - return "(anyconst {}'{})".format(self.width, "s" if self.signed else "") - - -@final -class AnySeq(AnyValue): - def __repr__(self): - return "(anyseq {}'{})".format(self.width, "s" if self.signed else "") - - -@final -class Operator(Value): - def __init__(self, operator, operands, *, src_loc_at=0): - super().__init__(src_loc_at=1 + src_loc_at) - self.operator = operator - self.operands = [Value.cast(op) for op in operands] - - def shape(self): - def _bitwise_binary_shape(a_shape, b_shape): - a_bits, a_sign = a_shape - b_bits, b_sign = b_shape - if not a_sign and not b_sign: - # both operands unsigned - return Shape(max(a_bits, b_bits), False) - elif a_sign and b_sign: - # both operands signed - return Shape(max(a_bits, b_bits), True) - elif not a_sign and b_sign: - # first operand unsigned (add sign bit), second operand signed - return Shape(max(a_bits + 1, b_bits), True) - else: - # first signed, second operand unsigned (add sign bit) - return Shape(max(a_bits, b_bits + 1), True) - - op_shapes = list(map(lambda x: x.shape(), self.operands)) - if len(op_shapes) == 1: - (a_width, a_signed), = op_shapes - if self.operator in ("+", "~"): - return Shape(a_width, a_signed) - if self.operator == "-": - return Shape(a_width + 1, True) - if self.operator in ("b", "r|", "r&", "r^"): - return Shape(1, False) - if self.operator == "u": - return Shape(a_width, False) - if self.operator == "s": - return Shape(a_width, True) - elif len(op_shapes) == 2: - (a_width, a_signed), (b_width, b_signed) = op_shapes - if self.operator in ("+", "-"): - width, signed = _bitwise_binary_shape(*op_shapes) - return Shape(width + 1, signed) - if self.operator == "*": - return Shape(a_width + b_width, a_signed or b_signed) - if self.operator in ("//", "%"): - assert not b_signed - return Shape(a_width, a_signed) - if self.operator in ("<", "<=", "==", "!=", ">", ">="): - return Shape(1, False) - if self.operator in ("&", "^", "|"): - return _bitwise_binary_shape(*op_shapes) - if self.operator == "<<": - assert not b_signed - return Shape(a_width + 2 ** b_width - 1, a_signed) - if self.operator == ">>": - assert not b_signed - return Shape(a_width, a_signed) - elif len(op_shapes) == 3: - if self.operator == "m": - s_shape, a_shape, b_shape = op_shapes - return _bitwise_binary_shape(a_shape, b_shape) - raise NotImplementedError("Operator {}/{} not implemented" - .format(self.operator, len(op_shapes))) # :nocov: - - def _rhs_signals(self): - return union(op._rhs_signals() for op in self.operands) - - def __repr__(self): - return "({} {})".format(self.operator, " ".join(map(repr, self.operands))) - - -def Mux(sel, val1, val0): - """Choose between two values. - - Parameters - ---------- - sel : Value, in - Selector. - val1 : Value, in - val0 : Value, in - Input values. - - Returns - ------- - Value, out - Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``. - """ - return Operator("m", [sel, val1, val0]) - - -@final -class Slice(Value): - def __init__(self, value, start, stop, *, src_loc_at=0): - if not isinstance(start, int): - raise TypeError("Slice start must be an integer, not {!r}".format(start)) - if not isinstance(stop, int): - raise TypeError("Slice stop must be an integer, not {!r}".format(stop)) - - n = len(value) - if start not in range(-(n+1), n+1): - raise IndexError("Cannot start slice {} bits into {}-bit value".format(start, n)) - if start < 0: - start += n - if stop not in range(-(n+1), n+1): - raise IndexError("Cannot stop slice {} bits into {}-bit value".format(stop, n)) - if stop < 0: - stop += n - if start > stop: - raise IndexError("Slice start {} must be less than slice stop {}".format(start, stop)) - - super().__init__(src_loc_at=src_loc_at) - self.value = Value.cast(value) - self.start = int(start) - self.stop = int(stop) - - def shape(self): - return Shape(self.stop - self.start) - - def _lhs_signals(self): - return self.value._lhs_signals() - - def _rhs_signals(self): - return self.value._rhs_signals() - - def __repr__(self): - return "(slice {} {}:{})".format(repr(self.value), self.start, self.stop) - - -@final -class Part(Value): - def __init__(self, value, offset, width, stride=1, *, src_loc_at=0): - if not isinstance(width, int) or width < 0: - raise TypeError("Part width must be a non-negative integer, not {!r}".format(width)) - if not isinstance(stride, int) or stride <= 0: - raise TypeError("Part stride must be a positive integer, not {!r}".format(stride)) - - super().__init__(src_loc_at=src_loc_at) - self.value = value - self.offset = Value.cast(offset) - self.width = width - self.stride = stride - - def shape(self): - return Shape(self.width) - - def _lhs_signals(self): - return self.value._lhs_signals() - - def _rhs_signals(self): - return self.value._rhs_signals() | self.offset._rhs_signals() - - def __repr__(self): - return "(part {} {} {} {})".format(repr(self.value), repr(self.offset), - self.width, self.stride) - - -@final -class Cat(Value): - """Concatenate values. - - Form a compound ``Value`` from several smaller ones by concatenation. - The first argument occupies the lower bits of the result. - The return value can be used on either side of an assignment, that - is, the concatenated value can be used as an argument on the RHS or - as a target on the LHS. If it is used on the LHS, it must solely - consist of ``Signal`` s, slices of ``Signal`` s, and other concatenations - meeting these properties. The bit length of the return value is the sum of - the bit lengths of the arguments:: - - len(Cat(args)) == sum(len(arg) for arg in args) - - Parameters - ---------- - *args : Values or iterables of Values, inout - ``Value`` s to be concatenated. - - Returns - ------- - Value, inout - Resulting ``Value`` obtained by concatentation. - """ - def __init__(self, *args, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - self.parts = [Value.cast(v) for v in flatten(args)] - - def shape(self): - return Shape(sum(len(part) for part in self.parts)) - - def _lhs_signals(self): - return union((part._lhs_signals() for part in self.parts), start=SignalSet()) - - def _rhs_signals(self): - return union((part._rhs_signals() for part in self.parts), start=SignalSet()) - - def _as_const(self): - value = 0 - for part in reversed(self.parts): - value <<= len(part) - value |= part._as_const() - return value - - def __repr__(self): - return "(cat {})".format(" ".join(map(repr, self.parts))) - - -@final -class Repl(Value): - """Replicate a value - - An input value is replicated (repeated) several times - to be used on the RHS of assignments:: - - len(Repl(s, n)) == len(s) * n - - Parameters - ---------- - value : Value, in - Input value to be replicated. - count : int - Number of replications. - - Returns - ------- - Repl, out - Replicated value. - """ - def __init__(self, value, count, *, src_loc_at=0): - if not isinstance(count, int) or count < 0: - raise TypeError("Replication count must be a non-negative integer, not {!r}" - .format(count)) - - super().__init__(src_loc_at=src_loc_at) - self.value = Value.cast(value) - self.count = count - - def shape(self): - return Shape(len(self.value) * self.count) - - def _rhs_signals(self): - return self.value._rhs_signals() - - def __repr__(self): - return "(repl {!r} {})".format(self.value, self.count) - - -# @final -class Signal(Value, DUID): - """A varying integer value. - - Parameters - ---------- - shape : ``Shape``-castable object or None - Specification for the number of bits in this ``Signal`` and its signedness (whether it - can represent negative values). See ``Shape.cast`` for details. - If not specified, ``shape`` defaults to 1-bit and non-signed. - name : str - Name hint for this signal. If ``None`` (default) the name is inferred from the variable - name this ``Signal`` is assigned to. - reset : int or integral Enum - Reset (synchronous) or default (combinatorial) value. - When this ``Signal`` is assigned to in synchronous context and the corresponding clock - domain is reset, the ``Signal`` assumes the given value. When this ``Signal`` is unassigned - in combinatorial context (due to conditional assignments not being taken), the ``Signal`` - assumes its ``reset`` value. Defaults to 0. - reset_less : bool - If ``True``, do not generate reset logic for this ``Signal`` in synchronous statements. - The ``reset`` value is only used as a combinatorial default or as the initial value. - Defaults to ``False``. - attrs : dict - Dictionary of synthesis attributes. - decoder : function or Enum - A function converting integer signal values to human-readable strings (e.g. FSM state - names). If an ``Enum`` subclass is passed, it is concisely decoded using format string - ``"{0.name:}/{0.value:}"``, or a number if the signal value is not a member of - the enumeration. - - Attributes - ---------- - width : int - signed : bool - name : str - reset : int - reset_less : bool - attrs : dict - decoder : function - """ - - def __init__(self, shape=None, *, name=None, reset=0, reset_less=False, - attrs=None, decoder=None, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - - if name is not None and not isinstance(name, str): - raise TypeError("Name must be a string, not {!r}".format(name)) - self.name = name or tracer.get_var_name(depth=2 + src_loc_at, default="$signal") - - if shape is None: - shape = unsigned(1) - self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at) - - if isinstance(reset, Enum): - reset = reset.value - if not isinstance(reset, int): - raise TypeError("Reset value has to be an int or an integral Enum") - - reset_width = bits_for(reset, self.signed) - if reset != 0 and reset_width > self.width: - warnings.warn("Reset value {!r} requires {} bits to represent, but the signal " - "only has {} bits" - .format(reset, reset_width, self.width), - SyntaxWarning, stacklevel=2 + src_loc_at) - - self.reset = reset - self.reset_less = bool(reset_less) - - self.attrs = OrderedDict(() if attrs is None else attrs) - - if decoder is None and isinstance(shape, type) and issubclass(shape, Enum): - decoder = shape - if isinstance(decoder, type) and issubclass(decoder, Enum): - def enum_decoder(value): - try: - return "{0.name:}/{0.value:}".format(decoder(value)) - except ValueError: - return str(value) - self.decoder = enum_decoder - self._enum_class = decoder - else: - self.decoder = decoder - self._enum_class = None - - # Not a @classmethod because nmigen.compat requires it. - @staticmethod - def like(other, *, name=None, name_suffix=None, src_loc_at=0, **kwargs): - """Create Signal based on another. - - Parameters - ---------- - other : Value - Object to base this Signal on. - """ - if name is not None: - new_name = str(name) - elif name_suffix is not None: - new_name = other.name + str(name_suffix) - else: - new_name = tracer.get_var_name(depth=2 + src_loc_at, default="$like") - kw = dict(shape=Value.cast(other).shape(), name=new_name) - if isinstance(other, Signal): - kw.update(reset=other.reset, reset_less=other.reset_less, - attrs=other.attrs, decoder=other.decoder) - kw.update(kwargs) - return Signal(**kw, src_loc_at=1 + src_loc_at) - - def shape(self): - return Shape(self.width, self.signed) - - def _lhs_signals(self): - return SignalSet((self,)) - - def _rhs_signals(self): - return SignalSet((self,)) - - def __repr__(self): - return "(sig {})".format(self.name) - - -@final -class ClockSignal(Value): - """Clock signal for a clock domain. - - Any ``ClockSignal`` is equivalent to ``cd.clk`` for a clock domain with the corresponding name. - All of these signals ultimately refer to the same signal, but they can be manipulated - independently of the clock domain, even before the clock domain is created. - - Parameters - ---------- - domain : str - Clock domain to obtain a clock signal for. Defaults to ``"sync"``. - """ - def __init__(self, domain="sync", *, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - if not isinstance(domain, str): - raise TypeError("Clock domain name must be a string, not {!r}".format(domain)) - if domain == "comb": - raise ValueError("Domain '{}' does not have a clock".format(domain)) - self.domain = domain - - def shape(self): - return Shape(1) - - def _lhs_signals(self): - return SignalSet((self,)) - - def _rhs_signals(self): - raise NotImplementedError("ClockSignal must be lowered to a concrete signal") # :nocov: - - def __repr__(self): - return "(clk {})".format(self.domain) - - -@final -class ResetSignal(Value): - """Reset signal for a clock domain. - - Any ``ResetSignal`` is equivalent to ``cd.rst`` for a clock domain with the corresponding name. - All of these signals ultimately refer to the same signal, but they can be manipulated - independently of the clock domain, even before the clock domain is created. - - Parameters - ---------- - domain : str - Clock domain to obtain a reset signal for. Defaults to ``"sync"``. - allow_reset_less : bool - If the clock domain is reset-less, act as a constant ``0`` instead of reporting an error. - """ - def __init__(self, domain="sync", allow_reset_less=False, *, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - if not isinstance(domain, str): - raise TypeError("Clock domain name must be a string, not {!r}".format(domain)) - if domain == "comb": - raise ValueError("Domain '{}' does not have a reset".format(domain)) - self.domain = domain - self.allow_reset_less = allow_reset_less - - def shape(self): - return Shape(1) - - def _lhs_signals(self): - return SignalSet((self,)) - - def _rhs_signals(self): - raise NotImplementedError("ResetSignal must be lowered to a concrete signal") # :nocov: - - def __repr__(self): - return "(rst {})".format(self.domain) - - -class Array(MutableSequence): - """Addressable multiplexer. - - An array is similar to a ``list`` that can also be indexed by ``Value``s; indexing by an integer or a slice works the same as for Python lists, but indexing by a ``Value`` results - in a proxy. - - The array proxy can be used as an ordinary ``Value``, i.e. participate in calculations and - assignments, provided that all elements of the array are values. The array proxy also supports - attribute access and further indexing, each returning another array proxy; this means that - the results of indexing into arrays, arrays of records, and arrays of arrays can all - be used as first-class values. - - It is an error to change an array or any of its elements after an array proxy was created. - Changing the array directly will raise an exception. However, it is not possible to detect - the elements being modified; if an element's attribute or element is modified after the proxy - for it has been created, the proxy will refer to stale data. - - Examples - -------- - - Simple array:: - - gpios = Array(Signal() for _ in range(10)) - with m.If(bus.we): - m.d.sync += gpios[bus.addr].eq(bus.w_data) - with m.Else(): - m.d.sync += bus.r_data.eq(gpios[bus.addr]) - - Multidimensional array:: - - mult = Array(Array(x * y for y in range(10)) for x in range(10)) - a = Signal.range(10) - b = Signal.range(10) - r = Signal(8) - m.d.comb += r.eq(mult[a][b]) - - Array of records:: - - layout = [ - ("r_data", 16), - ("r_en", 1), - ] - buses = Array(Record(layout) for busno in range(4)) - master = Record(layout) - m.d.comb += [ - buses[sel].r_en.eq(master.r_en), - master.r_data.eq(buses[sel].r_data), - ] - """ - def __init__(self, iterable=()): - self._inner = list(iterable) - self._proxy_at = None - self._mutable = True - - def __getitem__(self, index): - if isinstance(index, Value): - if self._mutable: - self._proxy_at = tracer.get_src_loc() - self._mutable = False - return ArrayProxy(self, index) - else: - return self._inner[index] - - def __len__(self): - return len(self._inner) - - def _check_mutability(self): - if not self._mutable: - raise ValueError("Array can no longer be mutated after it was indexed with a value " - "at {}:{}".format(*self._proxy_at)) - - def __setitem__(self, index, value): - self._check_mutability() - self._inner[index] = value - - def __delitem__(self, index): - self._check_mutability() - del self._inner[index] - - def insert(self, index, value): - self._check_mutability() - self._inner.insert(index, value) - - def __repr__(self): - return "(array{} [{}])".format(" mutable" if self._mutable else "", - ", ".join(map(repr, self._inner))) - - -@final -class ArrayProxy(Value): - def __init__(self, elems, index, *, src_loc_at=0): - super().__init__(src_loc_at=1 + src_loc_at) - self.elems = elems - self.index = Value.cast(index) - - def __getattr__(self, attr): - return ArrayProxy([getattr(elem, attr) for elem in self.elems], self.index) - - def __getitem__(self, index): - return ArrayProxy([ elem[index] for elem in self.elems], self.index) - - def _iter_as_values(self): - return (Value.cast(elem) for elem in self.elems) - - def shape(self): - unsigned_width = signed_width = 0 - has_unsigned = has_signed = False - for elem_width, elem_signed in (elem.shape() for elem in self._iter_as_values()): - if elem_signed: - has_signed = True - signed_width = max(signed_width, elem_width) - else: - has_unsigned = True - unsigned_width = max(unsigned_width, elem_width) - # The shape of the proxy must be such that it preserves the mathematical value of the array - # elements. I.e., shape-wise, an array proxy must be identical to an equivalent mux tree. - # To ensure this holds, if the array contains both signed and unsigned values, make sure - # that every unsigned value is zero-extended by at least one bit. - if has_signed and has_unsigned and unsigned_width >= signed_width: - # Array contains both signed and unsigned values, and at least one of the unsigned - # values won't be zero-extended otherwise. - return signed(unsigned_width + 1) - else: - # Array contains values of the same signedness, or else all of the unsigned values - # are zero-extended. - return Shape(max(unsigned_width, signed_width), has_signed) - - def _lhs_signals(self): - signals = union((elem._lhs_signals() for elem in self._iter_as_values()), - start=SignalSet()) - return signals - - def _rhs_signals(self): - signals = union((elem._rhs_signals() for elem in self._iter_as_values()), - start=SignalSet()) - return self.index._rhs_signals() | signals - - def __repr__(self): - return "(proxy (array [{}]) {!r})".format(", ".join(map(repr, self.elems)), self.index) - - -# TODO(nmigen-0.4): remove -class UserValue(Value): - """Value with custom lowering. - - A ``UserValue`` is a value whose precise representation does not have to be immediately known, - which is useful in certain metaprogramming scenarios. Instead of providing fixed semantics - upfront, it is kept abstract for as long as possible, only being lowered to a concrete nMigen - value when required. - - Note that the ``lower`` method will only be called once; this is necessary to ensure that - nMigen's view of representation of all values stays internally consistent. If the class - deriving from ``UserValue`` is mutable, then it must ensure that after ``lower`` is called, - it is not mutated in a way that changes its representation. - - The following is an incomplete list of actions that, when applied to an ``UserValue`` directly - or indirectly, will cause it to be lowered, provided as an illustrative reference: - * Querying the shape using ``.shape()`` or ``len()``; - * Creating a similarly shaped signal using ``Signal.like``; - * Indexing or iterating through individual bits; - * Adding an assignment to the value to a ``Module`` using ``m.d. +=``. - """ - @deprecated("instead of `UserValue`, use `ValueCastable`", stacklevel=3) - def __init__(self, *, src_loc_at=0): - super().__init__(src_loc_at=1 + src_loc_at) - self.__lowered = None - - @abstractmethod - def lower(self): - """Conversion to a concrete representation.""" - pass # :nocov: - - def _lazy_lower(self): - if self.__lowered is None: - lowered = self.lower() - if isinstance(lowered, UserValue): - lowered = lowered._lazy_lower() - self.__lowered = Value.cast(lowered) - return self.__lowered - - def shape(self): - return self._lazy_lower().shape() - - def _lhs_signals(self): - return self._lazy_lower()._lhs_signals() - - def _rhs_signals(self): - return self._lazy_lower()._rhs_signals() - - -class ValueCastable: - """Base class for classes which can be cast to Values. - - A ``ValueCastable`` can be cast to ``Value``, meaning its precise representation does not have - to be immediately known. This is useful in certain metaprogramming scenarios. Instead of - providing fixed semantics upfront, it is kept abstract for as long as possible, only being - cast to a concrete nMigen value when required. - - Note that it is necessary to ensure that nMigen's view of representation of all values stays - internally consistent. The class deriving from ``ValueCastable`` must decorate the ``as_value`` - method with the ``lowermethod`` decorator, which ensures that all calls to ``as_value`` return - the same ``Value`` representation. If the class deriving from ``ValueCastable`` is mutable, - it is up to the user to ensure that it is not mutated in a way that changes its representation - after the first call to ``as_value``. - """ - def __new__(cls, *args, **kwargs): - self = super().__new__(cls) - if not hasattr(self, "as_value"): - raise TypeError(f"Class '{cls.__name__}' deriving from `ValueCastable` must override " - "the `as_value` method") - - if not hasattr(self.as_value, "_ValueCastable__memoized"): - raise TypeError(f"Class '{cls.__name__}' deriving from `ValueCastable` must decorate " - "the `as_value` method with the `ValueCastable.lowermethod` decorator") - return self - - @staticmethod - def lowermethod(func): - """Decorator to memoize lowering methods. - - Ensures the decorated method is called only once, with subsequent method calls returning - the object returned by the first first method call. - - This decorator is required to decorate the ``as_value`` method of ``ValueCastable`` - subclasses. This is to ensure that nMigen's view of representation of all values stays - internally consistent. - """ - @functools.wraps(func) - def wrapper_memoized(self, *args, **kwargs): - # Use `in self.__dict__` instead of `hasattr` to avoid interfering with custom - # `__getattr__` implementations. - if not "_ValueCastable__lowered_to" in self.__dict__: - self.__lowered_to = func(self, *args, **kwargs) - return self.__lowered_to - wrapper_memoized.__memoized = True - return wrapper_memoized - - -@final -class Sample(Value): - """Value from the past. - - A ``Sample`` of an expression is equal to the value of the expression ``clocks`` clock edges - of the ``domain`` clock back. If that moment is before the beginning of time, it is equal - to the value of the expression calculated as if each signal had its reset value. - """ - def __init__(self, expr, clocks, domain, *, src_loc_at=0): - super().__init__(src_loc_at=1 + src_loc_at) - self.value = Value.cast(expr) - self.clocks = int(clocks) - self.domain = domain - if not isinstance(self.value, (Const, Signal, ClockSignal, ResetSignal, Initial)): - raise TypeError("Sampled value must be a signal or a constant, not {!r}" - .format(self.value)) - if self.clocks < 0: - raise ValueError("Cannot sample a value {} cycles in the future" - .format(-self.clocks)) - if not (self.domain is None or isinstance(self.domain, str)): - raise TypeError("Domain name must be a string or None, not {!r}" - .format(self.domain)) - - def shape(self): - return self.value.shape() - - def _rhs_signals(self): - return SignalSet((self,)) - - def __repr__(self): - return "(sample {!r} @ {}[{}])".format( - self.value, "" if self.domain is None else self.domain, self.clocks) - - -def Past(expr, clocks=1, domain=None): - return Sample(expr, clocks, domain) - - -def Stable(expr, clocks=0, domain=None): - return Sample(expr, clocks + 1, domain) == Sample(expr, clocks, domain) - - -def Rose(expr, clocks=0, domain=None): - return ~Sample(expr, clocks + 1, domain) & Sample(expr, clocks, domain) - - -def Fell(expr, clocks=0, domain=None): - return Sample(expr, clocks + 1, domain) & ~Sample(expr, clocks, domain) - - -@final -class Initial(Value): - """Start indicator, for model checking. - - An ``Initial`` signal is ``1`` at the first cycle of model checking, and ``0`` at any other. - """ - def __init__(self, *, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - - def shape(self): - return Shape(1) - - def _rhs_signals(self): - return SignalSet((self,)) - - def __repr__(self): - return "(initial)" - - -class _StatementList(list): - def __repr__(self): - return "({})".format(" ".join(map(repr, self))) - - -class Statement: - def __init__(self, *, src_loc_at=0): - self.src_loc = tracer.get_src_loc(1 + src_loc_at) - - @staticmethod - def cast(obj): - if isinstance(obj, Iterable): - return _StatementList(list(chain.from_iterable(map(Statement.cast, obj)))) - else: - if isinstance(obj, Statement): - return _StatementList([obj]) - else: - raise TypeError("Object {!r} is not an nMigen statement".format(obj)) - - -@final -class Assign(Statement): - def __init__(self, lhs, rhs, *, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - self.lhs = Value.cast(lhs) - self.rhs = Value.cast(rhs) - - def _lhs_signals(self): - return self.lhs._lhs_signals() - - def _rhs_signals(self): - return self.lhs._rhs_signals() | self.rhs._rhs_signals() - - def __repr__(self): - return "(eq {!r} {!r})".format(self.lhs, self.rhs) - - -class UnusedProperty(UnusedMustUse): - pass - - -class Property(Statement, MustUse): - _MustUse__warning = UnusedProperty - - def __init__(self, test, *, _check=None, _en=None, src_loc_at=0): - super().__init__(src_loc_at=src_loc_at) - self.test = Value.cast(test) - self._check = _check - self._en = _en - if self._check is None: - self._check = Signal(reset_less=True, name="${}$check".format(self._kind)) - self._check.src_loc = self.src_loc - if _en is None: - self._en = Signal(reset_less=True, name="${}$en".format(self._kind)) - self._en.src_loc = self.src_loc - - def _lhs_signals(self): - return SignalSet((self._en, self._check)) - - def _rhs_signals(self): - return self.test._rhs_signals() - - def __repr__(self): - return "({} {!r})".format(self._kind, self.test) - - -@final -class Assert(Property): - _kind = "assert" - - -@final -class Assume(Property): - _kind = "assume" - - -@final -class Cover(Property): - _kind = "cover" - - -# @final -class Switch(Statement): - def __init__(self, test, cases, *, src_loc=None, src_loc_at=0, case_src_locs={}): - if src_loc is None: - super().__init__(src_loc_at=src_loc_at) - else: - # Switch is a bit special in terms of location tracking because it is usually created - # long after the control has left the statement that directly caused its creation. - self.src_loc = src_loc - # Switch is also a bit special in that its parts also have location information. It can't - # be automatically traced, so whatever constructs a Switch may optionally provide it. - self.case_src_locs = {} - - self.test = Value.cast(test) - self.cases = OrderedDict() - for orig_keys, stmts in cases.items(): - # Map: None -> (); key -> (key,); (key...) -> (key...) - keys = orig_keys - if keys is None: - keys = () - if not isinstance(keys, tuple): - keys = (keys,) - # Map: 2 -> "0010"; "0010" -> "0010" - new_keys = () - key_mask = (1 << len(self.test)) - 1 - for key in keys: - if isinstance(key, str): - key = "".join(key.split()) # remove whitespace - elif isinstance(key, int): - key = format(key & key_mask, "b").rjust(len(self.test), "0") - elif isinstance(key, Enum): - key = format(key.value & key_mask, "b").rjust(len(self.test), "0") - else: - raise TypeError("Object {!r} cannot be used as a switch key" - .format(key)) - assert len(key) == len(self.test) - new_keys = (*new_keys, key) - if not isinstance(stmts, Iterable): - stmts = [stmts] - self.cases[new_keys] = Statement.cast(stmts) - if orig_keys in case_src_locs: - self.case_src_locs[new_keys] = case_src_locs[orig_keys] - - def _lhs_signals(self): - signals = union((s._lhs_signals() for ss in self.cases.values() for s in ss), - start=SignalSet()) - return signals - - def _rhs_signals(self): - signals = union((s._rhs_signals() for ss in self.cases.values() for s in ss), - start=SignalSet()) - return self.test._rhs_signals() | signals - - def __repr__(self): - def case_repr(keys, stmts): - stmts_repr = " ".join(map(repr, stmts)) - if keys == (): - return "(default {})".format(stmts_repr) - elif len(keys) == 1: - return "(case {} {})".format(keys[0], stmts_repr) - else: - return "(case ({}) {})".format(" ".join(keys), stmts_repr) - case_reprs = [case_repr(keys, stmts) for keys, stmts in self.cases.items()] - return "(switch {!r} {})".format(self.test, " ".join(case_reprs)) - - -class _MappedKeyCollection(metaclass=ABCMeta): - @abstractmethod - def _map_key(self, key): - pass # :nocov: - - @abstractmethod - def _unmap_key(self, key): - pass # :nocov: - - -class _MappedKeyDict(MutableMapping, _MappedKeyCollection): - def __init__(self, pairs=()): - self._storage = OrderedDict() - for key, value in pairs: - self[key] = value - - def __getitem__(self, key): - key = None if key is None else self._map_key(key) - return self._storage[key] - - def __setitem__(self, key, value): - key = None if key is None else self._map_key(key) - self._storage[key] = value - - def __delitem__(self, key): - key = None if key is None else self._map_key(key) - del self._storage[key] - - def __iter__(self): - for key in self._storage: - if key is None: - yield None - else: - yield self._unmap_key(key) - - def __eq__(self, other): - if not isinstance(other, type(self)): - return False - if len(self) != len(other): - return False - for ak, bk in zip(sorted(self._storage), sorted(other._storage)): - if ak != bk: - return False - if self._storage[ak] != other._storage[bk]: - return False - return True - - def __len__(self): - return len(self._storage) - - def __repr__(self): - pairs = ["({!r}, {!r})".format(k, v) for k, v in self.items()] - return "{}.{}([{}])".format(type(self).__module__, type(self).__name__, - ", ".join(pairs)) - - -class _MappedKeySet(MutableSet, _MappedKeyCollection): - def __init__(self, elements=()): - self._storage = OrderedDict() - for elem in elements: - self.add(elem) - - def add(self, value): - self._storage[self._map_key(value)] = None - - def update(self, values): - for value in values: - self.add(value) - - def discard(self, value): - if value in self: - del self._storage[self._map_key(value)] - - def __contains__(self, value): - return self._map_key(value) in self._storage - - def __iter__(self): - for key in [k for k in self._storage]: - yield self._unmap_key(key) - - def __len__(self): - return len(self._storage) - - def __repr__(self): - return "{}.{}({})".format(type(self).__module__, type(self).__name__, - ", ".join(repr(x) for x in self)) - - -class ValueKey: - def __init__(self, value): - self.value = Value.cast(value) - if isinstance(self.value, Const): - self._hash = hash(self.value.value) - elif isinstance(self.value, (Signal, AnyValue)): - self._hash = hash(self.value.duid) - elif isinstance(self.value, (ClockSignal, ResetSignal)): - self._hash = hash(self.value.domain) - elif isinstance(self.value, Operator): - self._hash = hash((self.value.operator, - tuple(ValueKey(o) for o in self.value.operands))) - elif isinstance(self.value, Slice): - self._hash = hash((ValueKey(self.value.value), self.value.start, self.value.stop)) - elif isinstance(self.value, Part): - self._hash = hash((ValueKey(self.value.value), ValueKey(self.value.offset), - self.value.width, self.value.stride)) - elif isinstance(self.value, Cat): - self._hash = hash(tuple(ValueKey(o) for o in self.value.parts)) - elif isinstance(self.value, ArrayProxy): - self._hash = hash((ValueKey(self.value.index), - tuple(ValueKey(e) for e in self.value._iter_as_values()))) - elif isinstance(self.value, Sample): - self._hash = hash((ValueKey(self.value.value), self.value.clocks, self.value.domain)) - elif isinstance(self.value, Initial): - self._hash = 0 - else: # :nocov: - raise TypeError("Object {!r} cannot be used as a key in value collections" - .format(self.value)) - - def __hash__(self): - return self._hash - - def __eq__(self, other): - if type(other) is not ValueKey: - return False - if type(self.value) is not type(other.value): - return False - - if isinstance(self.value, Const): - return self.value.value == other.value.value - elif isinstance(self.value, (Signal, AnyValue)): - return self.value is other.value - elif isinstance(self.value, (ClockSignal, ResetSignal)): - return self.value.domain == other.value.domain - elif isinstance(self.value, Operator): - return (self.value.operator == other.value.operator and - len(self.value.operands) == len(other.value.operands) and - all(ValueKey(a) == ValueKey(b) - for a, b in zip(self.value.operands, other.value.operands))) - elif isinstance(self.value, Slice): - return (ValueKey(self.value.value) == ValueKey(other.value.value) and - self.value.start == other.value.start and - self.value.stop == other.value.stop) - elif isinstance(self.value, Part): - return (ValueKey(self.value.value) == ValueKey(other.value.value) and - ValueKey(self.value.offset) == ValueKey(other.value.offset) and - self.value.width == other.value.width and - self.value.stride == other.value.stride) - elif isinstance(self.value, Cat): - return all(ValueKey(a) == ValueKey(b) - for a, b in zip(self.value.parts, other.value.parts)) - elif isinstance(self.value, ArrayProxy): - return (ValueKey(self.value.index) == ValueKey(other.value.index) and - len(self.value.elems) == len(other.value.elems) and - all(ValueKey(a) == ValueKey(b) - for a, b in zip(self.value._iter_as_values(), - other.value._iter_as_values()))) - elif isinstance(self.value, Sample): - return (ValueKey(self.value.value) == ValueKey(other.value.value) and - self.value.clocks == other.value.clocks and - self.value.domain == self.value.domain) - elif isinstance(self.value, Initial): - return True - else: # :nocov: - raise TypeError("Object {!r} cannot be used as a key in value collections" - .format(self.value)) - - def __lt__(self, other): - if not isinstance(other, ValueKey): - return False - if type(self.value) != type(other.value): - return False - - if isinstance(self.value, Const): - return self.value < other.value - elif isinstance(self.value, (Signal, AnyValue)): - return self.value.duid < other.value.duid - elif isinstance(self.value, Slice): - return (ValueKey(self.value.value) < ValueKey(other.value.value) and - self.value.start < other.value.start and - self.value.end < other.value.end) - else: # :nocov: - raise TypeError("Object {!r} cannot be used as a key in value collections") - - def __repr__(self): - return "<{}.ValueKey {!r}>".format(__name__, self.value) - - -class ValueDict(_MappedKeyDict): - _map_key = ValueKey - _unmap_key = lambda self, key: key.value - - -class ValueSet(_MappedKeySet): - _map_key = ValueKey - _unmap_key = lambda self, key: key.value - - -class SignalKey: - def __init__(self, signal): - self.signal = signal - if isinstance(signal, Signal): - self._intern = (0, signal.duid) - elif type(signal) is ClockSignal: - self._intern = (1, signal.domain) - elif type(signal) is ResetSignal: - self._intern = (2, signal.domain) - else: - raise TypeError("Object {!r} is not an nMigen signal".format(signal)) - - def __hash__(self): - return hash(self._intern) - - def __eq__(self, other): - if type(other) is not SignalKey: - return False - return self._intern == other._intern - - def __lt__(self, other): - if type(other) is not SignalKey: - raise TypeError("Object {!r} cannot be compared to a SignalKey".format(signal)) - return self._intern < other._intern - - def __repr__(self): - return "<{}.SignalKey {!r}>".format(__name__, self.signal) - - -class SignalDict(_MappedKeyDict): - _map_key = SignalKey - _unmap_key = lambda self, key: key.signal - - -class SignalSet(_MappedKeySet): - _map_key = SignalKey - _unmap_key = lambda self, key: key.signal +warnings.warn("instead of nmigen.hdl.ast, use amaranth.hdl.ast", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/cd.py b/nmigen/hdl/cd.py index 96af20c..8e8f265 100644 --- a/nmigen/hdl/cd.py +++ b/nmigen/hdl/cd.py @@ -1,84 +1,7 @@ -from .. import tracer -from .ast import Signal +from amaranth.hdl.cd import * +from amaranth.hdl.cd import __all__ -__all__ = ["ClockDomain", "DomainError"] - - -class DomainError(Exception): - pass - - -class ClockDomain: - """Synchronous domain. - - Parameters - ---------- - name : str or None - Domain name. If ``None`` (the default) the name is inferred from the variable name this - ``ClockDomain`` is assigned to (stripping any `"cd_"` prefix). - reset_less : bool - If ``True``, the domain does not use a reset signal. Registers within this domain are - still all initialized to their reset state once, e.g. through Verilog `"initial"` - statements. - clk_edge : str - The edge of the clock signal on which signals are sampled. Must be one of "pos" or "neg". - async_reset : bool - If ``True``, the domain uses an asynchronous reset, and registers within this domain - are initialized to their reset state when reset level changes. Otherwise, registers - are initialized to reset state at the next clock cycle when reset is asserted. - local : bool - If ``True``, the domain will propagate only downwards in the design hierarchy. Otherwise, - the domain will propagate everywhere. - - Attributes - ---------- - clk : Signal, inout - The clock for this domain. Can be driven or used to drive other signals (preferably - in combinatorial context). - rst : Signal or None, inout - Reset signal for this domain. Can be driven or used to drive. - """ - - @staticmethod - def _name_for(domain_name, signal_name): - if domain_name == "sync": - return signal_name - else: - return "{}_{}".format(domain_name, signal_name) - - def __init__(self, name=None, *, clk_edge="pos", reset_less=False, async_reset=False, - local=False): - if name is None: - try: - name = tracer.get_var_name() - except tracer.NameNotFound: - raise ValueError("Clock domain name must be specified explicitly") - if name.startswith("cd_"): - name = name[3:] - if name == "comb": - raise ValueError("Domain '{}' may not be clocked".format(name)) - - if clk_edge not in ("pos", "neg"): - raise ValueError("Domain clock edge must be one of 'pos' or 'neg', not {!r}" - .format(clk_edge)) - - self.name = name - - self.clk = Signal(name=self._name_for(name, "clk"), src_loc_at=1) - self.clk_edge = clk_edge - - if reset_less: - self.rst = None - else: - self.rst = Signal(name=self._name_for(name, "rst"), src_loc_at=1) - - self.async_reset = async_reset - - self.local = local - - def rename(self, new_name): - self.name = new_name - self.clk.name = self._name_for(new_name, "clk") - if self.rst is not None: - self.rst.name = self._name_for(new_name, "rst") +import warnings +warnings.warn("instead of nmigen.hdl.cd, use amaranth.hdl.cd", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/dsl.py b/nmigen/hdl/dsl.py index 279e7c5..b96627b 100644 --- a/nmigen/hdl/dsl.py +++ b/nmigen/hdl/dsl.py @@ -1,546 +1,7 @@ -from collections import OrderedDict -from contextlib import contextmanager, _GeneratorContextManager -from functools import wraps -from enum import Enum +from amaranth.hdl.dsl import * +from amaranth.hdl.dsl import __all__ + + import warnings - -from .._utils import flatten, bits_for -from .. import tracer -from .ast import * -from .ir import * -from .cd import * -from .xfrm import * - - -__all__ = ["SyntaxError", "SyntaxWarning", "Module"] - - -class SyntaxError(Exception): - pass - - -class SyntaxWarning(Warning): - pass - - -class _ModuleBuilderProxy: - def __init__(self, builder, depth): - object.__setattr__(self, "_builder", builder) - object.__setattr__(self, "_depth", depth) - - -class _ModuleBuilderDomain(_ModuleBuilderProxy): - def __init__(self, builder, depth, domain): - super().__init__(builder, depth) - self._domain = domain - - def __iadd__(self, assigns): - self._builder._add_statement(assigns, domain=self._domain, depth=self._depth) - return self - - -class _ModuleBuilderDomains(_ModuleBuilderProxy): - def __getattr__(self, name): - if name == "submodules": - warnings.warn("Using '.d.{}' would add statements to clock domain {!r}; " - "did you mean .{} instead?" - .format(name, name, name), - SyntaxWarning, stacklevel=2) - if name == "comb": - domain = None - else: - domain = name - return _ModuleBuilderDomain(self._builder, self._depth, domain) - - def __getitem__(self, name): - return self.__getattr__(name) - - def __setattr__(self, name, value): - if name == "_depth": - object.__setattr__(self, name, value) - elif not isinstance(value, _ModuleBuilderDomain): - raise AttributeError("Cannot assign 'd.{}' attribute; did you mean 'd.{} +='?" - .format(name, name)) - - def __setitem__(self, name, value): - return self.__setattr__(name, value) - - -class _ModuleBuilderRoot: - def __init__(self, builder, depth): - self._builder = builder - self.domain = self.d = _ModuleBuilderDomains(builder, depth) - - def __getattr__(self, name): - if name in ("comb", "sync"): - raise AttributeError("'{}' object has no attribute '{}'; did you mean 'd.{}'?" - .format(type(self).__name__, name, name)) - raise AttributeError("'{}' object has no attribute '{}'" - .format(type(self).__name__, name)) - - -class _ModuleBuilderSubmodules: - def __init__(self, builder): - object.__setattr__(self, "_builder", builder) - - def __iadd__(self, modules): - for module in flatten([modules]): - self._builder._add_submodule(module) - return self - - def __setattr__(self, name, submodule): - self._builder._add_submodule(submodule, name) - - def __setitem__(self, name, value): - return self.__setattr__(name, value) - - def __getattr__(self, name): - return self._builder._get_submodule(name) - - def __getitem__(self, name): - return self.__getattr__(name) - - -class _ModuleBuilderDomainSet: - def __init__(self, builder): - object.__setattr__(self, "_builder", builder) - - def __iadd__(self, domains): - for domain in flatten([domains]): - if not isinstance(domain, ClockDomain): - raise TypeError("Only clock domains may be added to `m.domains`, not {!r}" - .format(domain)) - self._builder._add_domain(domain) - return self - - def __setattr__(self, name, domain): - if not isinstance(domain, ClockDomain): - raise TypeError("Only clock domains may be added to `m.domains`, not {!r}" - .format(domain)) - if domain.name != name: - raise NameError("Clock domain name {!r} must match name in `m.domains.{} += ...` " - "syntax" - .format(domain.name, name)) - self._builder._add_domain(domain) - - -# It's not particularly clean to depend on an internal interface, but, unfortunately, __bool__ -# must be defined on a class to be called during implicit conversion. -class _GuardedContextManager(_GeneratorContextManager): - def __init__(self, keyword, func, args, kwds): - self.keyword = keyword - return super().__init__(func, args, kwds) - - def __bool__(self): - raise SyntaxError("`if m.{kw}(...):` does not work; use `with m.{kw}(...)`" - .format(kw=self.keyword)) - - -def _guardedcontextmanager(keyword): - def decorator(func): - @wraps(func) - def helper(*args, **kwds): - return _GuardedContextManager(keyword, func, args, kwds) - return helper - return decorator - - -class FSM: - def __init__(self, state, encoding, decoding): - self.state = state - self.encoding = encoding - self.decoding = decoding - - def ongoing(self, name): - if name not in self.encoding: - self.encoding[name] = len(self.encoding) - return Operator("==", [self.state, self.encoding[name]], src_loc_at=0) - - -class Module(_ModuleBuilderRoot, Elaboratable): - @classmethod - def __init_subclass__(cls): - raise SyntaxError("Instead of inheriting from `Module`, inherit from `Elaboratable` " - "and return a `Module` from the `elaborate(self, platform)` method") - - def __init__(self): - _ModuleBuilderRoot.__init__(self, self, depth=0) - self.submodules = _ModuleBuilderSubmodules(self) - self.domains = _ModuleBuilderDomainSet(self) - - self._statements = Statement.cast([]) - self._ctrl_context = None - self._ctrl_stack = [] - - self._driving = SignalDict() - self._named_submodules = {} - self._anon_submodules = [] - self._domains = {} - self._generated = {} - - def _check_context(self, construct, context): - if self._ctrl_context != context: - if self._ctrl_context is None: - raise SyntaxError("{} is not permitted outside of {}" - .format(construct, context)) - else: - if self._ctrl_context == "Switch": - secondary_context = "Case" - if self._ctrl_context == "FSM": - secondary_context = "State" - raise SyntaxError("{} is not permitted directly inside of {}; it is permitted " - "inside of {} {}" - .format(construct, self._ctrl_context, - self._ctrl_context, secondary_context)) - - def _get_ctrl(self, name): - if self._ctrl_stack: - top_name, top_data = self._ctrl_stack[-1] - if top_name == name: - return top_data - - def _flush_ctrl(self): - while len(self._ctrl_stack) > self.domain._depth: - self._pop_ctrl() - - def _set_ctrl(self, name, data): - self._flush_ctrl() - self._ctrl_stack.append((name, data)) - return data - - def _check_signed_cond(self, cond): - cond = Value.cast(cond) - width, signed = cond.shape() - if signed: - warnings.warn("Signed values in If/Elif conditions usually result from inverting " - "Python booleans with ~, which leads to unexpected results. " - "Replace `~flag` with `not flag`. (If this is a false positive, " - "silence this warning with `m.If(x)` → `m.If(x.bool())`.)", - SyntaxWarning, stacklevel=4) - return cond - - @_guardedcontextmanager("If") - def If(self, cond): - self._check_context("If", context=None) - cond = self._check_signed_cond(cond) - src_loc = tracer.get_src_loc(src_loc_at=1) - if_data = self._set_ctrl("If", { - "depth": self.domain._depth, - "tests": [], - "bodies": [], - "src_loc": src_loc, - "src_locs": [], - }) - try: - _outer_case, self._statements = self._statements, [] - self.domain._depth += 1 - yield - self._flush_ctrl() - if_data["tests"].append(cond) - if_data["bodies"].append(self._statements) - if_data["src_locs"].append(src_loc) - finally: - self.domain._depth -= 1 - self._statements = _outer_case - - @_guardedcontextmanager("Elif") - def Elif(self, cond): - self._check_context("Elif", context=None) - cond = self._check_signed_cond(cond) - src_loc = tracer.get_src_loc(src_loc_at=1) - if_data = self._get_ctrl("If") - if if_data is None or if_data["depth"] != self.domain._depth: - raise SyntaxError("Elif without preceding If") - try: - _outer_case, self._statements = self._statements, [] - self.domain._depth += 1 - yield - self._flush_ctrl() - if_data["tests"].append(cond) - if_data["bodies"].append(self._statements) - if_data["src_locs"].append(src_loc) - finally: - self.domain._depth -= 1 - self._statements = _outer_case - - @_guardedcontextmanager("Else") - def Else(self): - self._check_context("Else", context=None) - src_loc = tracer.get_src_loc(src_loc_at=1) - if_data = self._get_ctrl("If") - if if_data is None or if_data["depth"] != self.domain._depth: - raise SyntaxError("Else without preceding If/Elif") - try: - _outer_case, self._statements = self._statements, [] - self.domain._depth += 1 - yield - self._flush_ctrl() - if_data["bodies"].append(self._statements) - if_data["src_locs"].append(src_loc) - finally: - self.domain._depth -= 1 - self._statements = _outer_case - self._pop_ctrl() - - @contextmanager - def Switch(self, test): - self._check_context("Switch", context=None) - switch_data = self._set_ctrl("Switch", { - "test": Value.cast(test), - "cases": OrderedDict(), - "src_loc": tracer.get_src_loc(src_loc_at=1), - "case_src_locs": {}, - }) - try: - self._ctrl_context = "Switch" - self.domain._depth += 1 - yield - finally: - self.domain._depth -= 1 - self._ctrl_context = None - self._pop_ctrl() - - @contextmanager - def Case(self, *patterns): - self._check_context("Case", context="Switch") - src_loc = tracer.get_src_loc(src_loc_at=1) - switch_data = self._get_ctrl("Switch") - new_patterns = () - for pattern in patterns: - if not isinstance(pattern, (int, str, Enum)): - raise SyntaxError("Case pattern must be an integer, a string, or an enumeration, " - "not {!r}" - .format(pattern)) - if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern): - raise SyntaxError("Case pattern '{}' must consist of 0, 1, and - (don't care) " - "bits, and may include whitespace" - .format(pattern)) - if (isinstance(pattern, str) and - len("".join(pattern.split())) != len(switch_data["test"])): - raise SyntaxError("Case pattern '{}' must have the same width as switch value " - "(which is {})" - .format(pattern, len(switch_data["test"]))) - if isinstance(pattern, int) and bits_for(pattern) > len(switch_data["test"]): - warnings.warn("Case pattern '{:b}' is wider than switch value " - "(which has width {}); comparison will never be true" - .format(pattern, len(switch_data["test"])), - SyntaxWarning, stacklevel=3) - continue - if isinstance(pattern, Enum) and bits_for(pattern.value) > len(switch_data["test"]): - warnings.warn("Case pattern '{:b}' ({}.{}) is wider than switch value " - "(which has width {}); comparison will never be true" - .format(pattern.value, pattern.__class__.__name__, pattern.name, - len(switch_data["test"])), - SyntaxWarning, stacklevel=3) - continue - new_patterns = (*new_patterns, pattern) - try: - _outer_case, self._statements = self._statements, [] - self._ctrl_context = None - yield - self._flush_ctrl() - # If none of the provided cases can possibly be true, omit this branch completely. - # This needs to be differentiated from no cases being provided in the first place, - # which means the branch will always match. - if not (patterns and not new_patterns): - switch_data["cases"][new_patterns] = self._statements - switch_data["case_src_locs"][new_patterns] = src_loc - finally: - self._ctrl_context = "Switch" - self._statements = _outer_case - - def Default(self): - return self.Case() - - @contextmanager - def FSM(self, reset=None, domain="sync", name="fsm"): - self._check_context("FSM", context=None) - if domain == "comb": - raise ValueError("FSM may not be driven by the '{}' domain".format(domain)) - fsm_data = self._set_ctrl("FSM", { - "name": name, - "signal": Signal(name="{}_state".format(name), src_loc_at=2), - "reset": reset, - "domain": domain, - "encoding": OrderedDict(), - "decoding": OrderedDict(), - "states": OrderedDict(), - "src_loc": tracer.get_src_loc(src_loc_at=1), - "state_src_locs": {}, - }) - self._generated[name] = fsm = \ - FSM(fsm_data["signal"], fsm_data["encoding"], fsm_data["decoding"]) - try: - self._ctrl_context = "FSM" - self.domain._depth += 1 - yield fsm - for state_name in fsm_data["encoding"]: - if state_name not in fsm_data["states"]: - raise NameError("FSM state '{}' is referenced but not defined" - .format(state_name)) - finally: - self.domain._depth -= 1 - self._ctrl_context = None - self._pop_ctrl() - - @contextmanager - def State(self, name): - self._check_context("FSM State", context="FSM") - src_loc = tracer.get_src_loc(src_loc_at=1) - fsm_data = self._get_ctrl("FSM") - if name in fsm_data["states"]: - raise NameError("FSM state '{}' is already defined".format(name)) - if name not in fsm_data["encoding"]: - fsm_data["encoding"][name] = len(fsm_data["encoding"]) - try: - _outer_case, self._statements = self._statements, [] - self._ctrl_context = None - yield - self._flush_ctrl() - fsm_data["states"][name] = self._statements - fsm_data["state_src_locs"][name] = src_loc - finally: - self._ctrl_context = "FSM" - self._statements = _outer_case - - @property - def next(self): - raise SyntaxError("Only assignment to `m.next` is permitted") - - @next.setter - def next(self, name): - if self._ctrl_context != "FSM": - for level, (ctrl_name, ctrl_data) in enumerate(reversed(self._ctrl_stack)): - if ctrl_name == "FSM": - if name not in ctrl_data["encoding"]: - ctrl_data["encoding"][name] = len(ctrl_data["encoding"]) - self._add_statement( - assigns=[ctrl_data["signal"].eq(ctrl_data["encoding"][name])], - domain=ctrl_data["domain"], - depth=len(self._ctrl_stack)) - return - - raise SyntaxError("`m.next = <...>` is only permitted inside an FSM state") - - def _pop_ctrl(self): - name, data = self._ctrl_stack.pop() - src_loc = data["src_loc"] - - if name == "If": - if_tests, if_bodies = data["tests"], data["bodies"] - if_src_locs = data["src_locs"] - - tests, cases = [], OrderedDict() - for if_test, if_case in zip(if_tests + [None], if_bodies): - if if_test is not None: - if len(if_test) != 1: - if_test = if_test.bool() - tests.append(if_test) - - if if_test is not None: - match = ("1" + "-" * (len(tests) - 1)).rjust(len(if_tests), "-") - else: - match = None - cases[match] = if_case - - self._statements.append(Switch(Cat(tests), cases, - src_loc=src_loc, case_src_locs=dict(zip(cases, if_src_locs)))) - - if name == "Switch": - switch_test, switch_cases = data["test"], data["cases"] - switch_case_src_locs = data["case_src_locs"] - - self._statements.append(Switch(switch_test, switch_cases, - src_loc=src_loc, case_src_locs=switch_case_src_locs)) - - if name == "FSM": - fsm_signal, fsm_reset, fsm_encoding, fsm_decoding, fsm_states = \ - data["signal"], data["reset"], data["encoding"], data["decoding"], data["states"] - fsm_state_src_locs = data["state_src_locs"] - if not fsm_states: - return - fsm_signal.width = bits_for(len(fsm_encoding) - 1) - if fsm_reset is None: - fsm_signal.reset = fsm_encoding[next(iter(fsm_states))] - else: - fsm_signal.reset = fsm_encoding[fsm_reset] - # The FSM is encoded such that the state with encoding 0 is always the reset state. - fsm_decoding.update((n, s) for s, n in fsm_encoding.items()) - fsm_signal.decoder = lambda n: "{}/{}".format(fsm_decoding[n], n) - self._statements.append(Switch(fsm_signal, - OrderedDict((fsm_encoding[name], stmts) for name, stmts in fsm_states.items()), - src_loc=src_loc, case_src_locs={fsm_encoding[name]: fsm_state_src_locs[name] - for name in fsm_states})) - - def _add_statement(self, assigns, domain, depth, compat_mode=False): - def domain_name(domain): - if domain is None: - return "comb" - else: - return domain - - while len(self._ctrl_stack) > self.domain._depth: - self._pop_ctrl() - - for stmt in Statement.cast(assigns): - if not compat_mode and not isinstance(stmt, (Assign, Assert, Assume, Cover)): - raise SyntaxError( - "Only assignments and property checks may be appended to d.{}" - .format(domain_name(domain))) - - stmt._MustUse__used = True - stmt = SampleDomainInjector(domain)(stmt) - - for signal in stmt._lhs_signals(): - if signal not in self._driving: - self._driving[signal] = domain - elif self._driving[signal] != domain: - cd_curr = self._driving[signal] - raise SyntaxError( - "Driver-driver conflict: trying to drive {!r} from d.{}, but it is " - "already driven from d.{}" - .format(signal, domain_name(domain), domain_name(cd_curr))) - - self._statements.append(stmt) - - def _add_submodule(self, submodule, name=None): - if not hasattr(submodule, "elaborate"): - raise TypeError("Trying to add {!r}, which does not implement .elaborate(), as " - "a submodule".format(submodule)) - if name == None: - self._anon_submodules.append(submodule) - else: - if name in self._named_submodules: - raise NameError("Submodule named '{}' already exists".format(name)) - self._named_submodules[name] = submodule - - def _get_submodule(self, name): - if name in self._named_submodules: - return self._named_submodules[name] - else: - raise AttributeError("No submodule named '{}' exists".format(name)) - - def _add_domain(self, cd): - if cd.name in self._domains: - raise NameError("Clock domain named '{}' already exists".format(cd.name)) - self._domains[cd.name] = cd - - def _flush(self): - while self._ctrl_stack: - self._pop_ctrl() - - def elaborate(self, platform): - self._flush() - - fragment = Fragment() - for name in self._named_submodules: - fragment.add_subfragment(Fragment.get(self._named_submodules[name], platform), name) - for submodule in self._anon_submodules: - fragment.add_subfragment(Fragment.get(submodule, platform), None) - statements = SampleDomainInjector("sync")(self._statements) - fragment.add_statements(statements) - for signal, domain in self._driving.items(): - fragment.add_driver(signal, domain) - fragment.add_domains(self._domains.values()) - fragment.generated.update(self._generated) - return fragment +warnings.warn("instead of nmigen.hdl.dsl, use amaranth.hdl.dsl", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/ir.py b/nmigen/hdl/ir.py index 3c1ec60..74a4833 100644 --- a/nmigen/hdl/ir.py +++ b/nmigen/hdl/ir.py @@ -1,592 +1,7 @@ -from abc import ABCMeta -from collections import defaultdict, OrderedDict -from functools import reduce +from amaranth.hdl.ir import * +from amaranth.hdl.ir import __all__ + + import warnings - -from .._utils import * -from .._unused import * -from .ast import * -from .cd import * - - -__all__ = ["UnusedElaboratable", "Elaboratable", "DriverConflict", "Fragment", "Instance"] - - -class UnusedElaboratable(UnusedMustUse): - pass - - -class Elaboratable(MustUse, metaclass=ABCMeta): - _MustUse__warning = UnusedElaboratable - - -class DriverConflict(UserWarning): - pass - - -class Fragment: - @staticmethod - def get(obj, platform): - code = None - while True: - if isinstance(obj, Fragment): - return obj - elif isinstance(obj, Elaboratable): - code = obj.elaborate.__code__ - obj._MustUse__used = True - obj = obj.elaborate(platform) - elif hasattr(obj, "elaborate"): - warnings.warn( - message="Class {!r} is an elaboratable that does not explicitly inherit from " - "Elaboratable; doing so would improve diagnostics" - .format(type(obj)), - category=RuntimeWarning, - stacklevel=2) - code = obj.elaborate.__code__ - obj = obj.elaborate(platform) - else: - raise AttributeError("Object {!r} cannot be elaborated".format(obj)) - if obj is None and code is not None: - warnings.warn_explicit( - message=".elaborate() returned None; missing return statement?", - category=UserWarning, - filename=code.co_filename, - lineno=code.co_firstlineno) - - def __init__(self): - self.ports = SignalDict() - self.drivers = OrderedDict() - self.statements = [] - self.domains = OrderedDict() - self.subfragments = [] - self.attrs = OrderedDict() - self.generated = OrderedDict() - self.flatten = False - - def add_ports(self, *ports, dir): - assert dir in ("i", "o", "io") - for port in flatten(ports): - self.ports[port] = dir - - def iter_ports(self, dir=None): - if dir is None: - yield from self.ports - else: - for port, port_dir in self.ports.items(): - if port_dir == dir: - yield port - - def add_driver(self, signal, domain=None): - if domain not in self.drivers: - self.drivers[domain] = SignalSet() - self.drivers[domain].add(signal) - - def iter_drivers(self): - for domain, signals in self.drivers.items(): - for signal in signals: - yield domain, signal - - def iter_comb(self): - if None in self.drivers: - yield from self.drivers[None] - - def iter_sync(self): - for domain, signals in self.drivers.items(): - if domain is None: - continue - for signal in signals: - yield domain, signal - - def iter_signals(self): - signals = SignalSet() - signals |= self.ports.keys() - for domain, domain_signals in self.drivers.items(): - if domain is not None: - cd = self.domains[domain] - signals.add(cd.clk) - if cd.rst is not None: - signals.add(cd.rst) - signals |= domain_signals - return signals - - def add_domains(self, *domains): - for domain in flatten(domains): - assert isinstance(domain, ClockDomain) - assert domain.name not in self.domains - self.domains[domain.name] = domain - - def iter_domains(self): - yield from self.domains - - def add_statements(self, *stmts): - for stmt in Statement.cast(stmts): - stmt._MustUse__used = True - self.statements.append(stmt) - - def add_subfragment(self, subfragment, name=None): - assert isinstance(subfragment, Fragment) - self.subfragments.append((subfragment, name)) - - def find_subfragment(self, name_or_index): - if isinstance(name_or_index, int): - if name_or_index < len(self.subfragments): - subfragment, name = self.subfragments[name_or_index] - return subfragment - raise NameError("No subfragment at index #{}".format(name_or_index)) - else: - for subfragment, name in self.subfragments: - if name == name_or_index: - return subfragment - raise NameError("No subfragment with name '{}'".format(name_or_index)) - - def find_generated(self, *path): - if len(path) > 1: - path_component, *path = path - return self.find_subfragment(path_component).find_generated(*path) - else: - item, = path - return self.generated[item] - - def elaborate(self, platform): - return self - - def _merge_subfragment(self, subfragment): - # Merge subfragment's everything except clock domains into this fragment. - # Flattening is done after clock domain propagation, so we can assume the domains - # are already the same in every involved fragment in the first place. - self.ports.update(subfragment.ports) - for domain, signal in subfragment.iter_drivers(): - self.add_driver(signal, domain) - self.statements += subfragment.statements - self.subfragments += subfragment.subfragments - - # Remove the merged subfragment. - found = False - for i, (check_subfrag, check_name) in enumerate(self.subfragments): # :nobr: - if subfragment == check_subfrag: - del self.subfragments[i] - found = True - break - assert found - - def _resolve_hierarchy_conflicts(self, hierarchy=("top",), mode="warn"): - assert mode in ("silent", "warn", "error") - - driver_subfrags = SignalDict() - memory_subfrags = OrderedDict() - def add_subfrag(registry, entity, entry): - # Because of missing domain insertion, at the point when this code runs, we have - # a mixture of bound and unbound {Clock,Reset}Signals. Map the bound ones to - # the actual signals (because the signal itself can be driven as well); but leave - # the unbound ones as it is, because there's no concrete signal for it yet anyway. - if isinstance(entity, ClockSignal) and entity.domain in self.domains: - entity = self.domains[entity.domain].clk - elif isinstance(entity, ResetSignal) and entity.domain in self.domains: - entity = self.domains[entity.domain].rst - - if entity not in registry: - registry[entity] = set() - registry[entity].add(entry) - - # For each signal driven by this fragment and/or its subfragments, determine which - # subfragments also drive it. - for domain, signal in self.iter_drivers(): - add_subfrag(driver_subfrags, signal, (None, hierarchy)) - - flatten_subfrags = set() - for i, (subfrag, name) in enumerate(self.subfragments): - if name is None: - name = "".format(i) - subfrag_hierarchy = hierarchy + (name,) - - if subfrag.flatten: - # Always flatten subfragments that explicitly request it. - flatten_subfrags.add((subfrag, subfrag_hierarchy)) - - if isinstance(subfrag, Instance): - # For memories (which are subfragments, but semantically a part of superfragment), - # record that this fragment is driving it. - if subfrag.type in ("$memrd", "$memwr"): - memory = subfrag.parameters["MEMID"] - add_subfrag(memory_subfrags, memory, (None, hierarchy)) - - # Never flatten instances. - continue - - # First, recurse into subfragments and let them detect driver conflicts as well. - subfrag_drivers, subfrag_memories = \ - subfrag._resolve_hierarchy_conflicts(subfrag_hierarchy, mode) - - # Second, classify subfragments by signals they drive and memories they use. - for signal in subfrag_drivers: - add_subfrag(driver_subfrags, signal, (subfrag, subfrag_hierarchy)) - for memory in subfrag_memories: - add_subfrag(memory_subfrags, memory, (subfrag, subfrag_hierarchy)) - - # Find out the set of subfragments that needs to be flattened into this fragment - # to resolve driver-driver conflicts. - def flatten_subfrags_if_needed(subfrags): - if len(subfrags) == 1: - return [] - flatten_subfrags.update((f, h) for f, h in subfrags if f is not None) - return list(sorted(".".join(h) for f, h in subfrags)) - - for signal, subfrags in driver_subfrags.items(): - subfrag_names = flatten_subfrags_if_needed(subfrags) - if not subfrag_names: - continue - - # While we're at it, show a message. - message = ("Signal '{}' is driven from multiple fragments: {}" - .format(signal, ", ".join(subfrag_names))) - if mode == "error": - raise DriverConflict(message) - elif mode == "warn": - message += "; hierarchy will be flattened" - warnings.warn_explicit(message, DriverConflict, *signal.src_loc) - - for memory, subfrags in memory_subfrags.items(): - subfrag_names = flatten_subfrags_if_needed(subfrags) - if not subfrag_names: - continue - - # While we're at it, show a message. - message = ("Memory '{}' is accessed from multiple fragments: {}" - .format(memory.name, ", ".join(subfrag_names))) - if mode == "error": - raise DriverConflict(message) - elif mode == "warn": - message += "; hierarchy will be flattened" - warnings.warn_explicit(message, DriverConflict, *memory.src_loc) - - # Flatten hierarchy. - for subfrag, subfrag_hierarchy in sorted(flatten_subfrags, key=lambda x: x[1]): - self._merge_subfragment(subfrag) - - # If we flattened anything, we might be in a situation where we have a driver conflict - # again, e.g. if we had a tree of fragments like A --- B --- C where only fragments - # A and C were driving a signal S. In that case, since B is not driving S itself, - # processing B will not result in any flattening, but since B is transitively driving S, - # processing A will flatten B into it. Afterwards, we have a tree like AB --- C, which - # has another conflict. - if any(flatten_subfrags): - # Try flattening again. - return self._resolve_hierarchy_conflicts(hierarchy, mode) - - # Nothing was flattened, we're done! - return (SignalSet(driver_subfrags.keys()), - set(memory_subfrags.keys())) - - def _propagate_domains_up(self, hierarchy=("top",)): - from .xfrm import DomainRenamer - - domain_subfrags = defaultdict(lambda: set()) - - # For each domain defined by a subfragment, determine which subfragments define it. - for i, (subfrag, name) in enumerate(self.subfragments): - # First, recurse into subfragments and let them propagate domains up as well. - hier_name = name - if hier_name is None: - hier_name = "".format(i) - subfrag._propagate_domains_up(hierarchy + (hier_name,)) - - # Second, classify subfragments by domains they define. - for domain_name, domain in subfrag.domains.items(): - if domain.local: - continue - domain_subfrags[domain_name].add((subfrag, name, i)) - - # For each domain defined by more than one subfragment, rename the domain in each - # of the subfragments such that they no longer conflict. - for domain_name, subfrags in domain_subfrags.items(): - if len(subfrags) == 1: - continue - - names = [n for f, n, i in subfrags] - if not all(names): - names = sorted("".format(i) if n is None else "'{}'".format(n) - for f, n, i in subfrags) - raise DomainError("Domain '{}' is defined by subfragments {} of fragment '{}'; " - "it is necessary to either rename subfragment domains " - "explicitly, or give names to subfragments" - .format(domain_name, ", ".join(names), ".".join(hierarchy))) - - if len(names) != len(set(names)): - names = sorted("#{}".format(i) for f, n, i in subfrags) - raise DomainError("Domain '{}' is defined by subfragments {} of fragment '{}', " - "some of which have identical names; it is necessary to either " - "rename subfragment domains explicitly, or give distinct names " - "to subfragments" - .format(domain_name, ", ".join(names), ".".join(hierarchy))) - - for subfrag, name, i in subfrags: - domain_name_map = {domain_name: "{}_{}".format(name, domain_name)} - self.subfragments[i] = (DomainRenamer(domain_name_map)(subfrag), name) - - # Finally, collect the (now unique) subfragment domains, and merge them into our domains. - for subfrag, name in self.subfragments: - for domain_name, domain in subfrag.domains.items(): - if domain.local: - continue - self.add_domains(domain) - - def _propagate_domains_down(self): - # For each domain defined in this fragment, ensure it also exists in all subfragments. - for subfrag, name in self.subfragments: - for domain in self.iter_domains(): - if domain in subfrag.domains: - assert self.domains[domain] is subfrag.domains[domain] - else: - subfrag.add_domains(self.domains[domain]) - - subfrag._propagate_domains_down() - - def _create_missing_domains(self, missing_domain, *, platform=None): - from .xfrm import DomainCollector - - collector = DomainCollector() - collector(self) - - new_domains = [] - for domain_name in collector.used_domains - collector.defined_domains: - if domain_name is None: - continue - value = missing_domain(domain_name) - if value is None: - raise DomainError("Domain '{}' is used but not defined".format(domain_name)) - if type(value) is ClockDomain: - self.add_domains(value) - # And expose ports on the newly added clock domain, since it is added directly - # and there was no chance to add any logic driving it. - new_domains.append(value) - else: - new_fragment = Fragment.get(value, platform=platform) - if domain_name not in new_fragment.domains: - defined = new_fragment.domains.keys() - raise DomainError( - "Fragment returned by missing domain callback does not define " - "requested domain '{}' (defines {})." - .format(domain_name, ", ".join("'{}'".format(n) for n in defined))) - self.add_subfragment(new_fragment, "cd_{}".format(domain_name)) - self.add_domains(new_fragment.domains.values()) - return new_domains - - def _propagate_domains(self, missing_domain, *, platform=None): - self._propagate_domains_up() - self._propagate_domains_down() - self._resolve_hierarchy_conflicts() - new_domains = self._create_missing_domains(missing_domain, platform=platform) - self._propagate_domains_down() - return new_domains - - def _prepare_use_def_graph(self, parent, level, uses, defs, ios, top): - def add_uses(*sigs, self=self): - for sig in flatten(sigs): - if sig not in uses: - uses[sig] = set() - uses[sig].add(self) - - def add_defs(*sigs): - for sig in flatten(sigs): - if sig not in defs: - defs[sig] = self - else: - assert defs[sig] is self - - def add_io(*sigs): - for sig in flatten(sigs): - if sig not in ios: - ios[sig] = self - else: - assert ios[sig] is self - - # Collect all signals we're driving (on LHS of statements), and signals we're using - # (on RHS of statements, or in clock domains). - for stmt in self.statements: - add_uses(stmt._rhs_signals()) - add_defs(stmt._lhs_signals()) - - for domain, _ in self.iter_sync(): - cd = self.domains[domain] - add_uses(cd.clk) - if cd.rst is not None: - add_uses(cd.rst) - - # Repeat for subfragments. - for subfrag, name in self.subfragments: - if isinstance(subfrag, Instance): - for port_name, (value, dir) in subfrag.named_ports.items(): - if dir == "i": - # Prioritize defs over uses. - rhs_without_outputs = value._rhs_signals() - subfrag.iter_ports(dir="o") - subfrag.add_ports(rhs_without_outputs, dir=dir) - add_uses(value._rhs_signals()) - if dir == "o": - subfrag.add_ports(value._lhs_signals(), dir=dir) - add_defs(value._lhs_signals()) - if dir == "io": - subfrag.add_ports(value._lhs_signals(), dir=dir) - add_io(value._lhs_signals()) - else: - parent[subfrag] = self - level [subfrag] = level[self] + 1 - - subfrag._prepare_use_def_graph(parent, level, uses, defs, ios, top) - - def _propagate_ports(self, ports, all_undef_as_ports): - # Take this fragment graph: - # - # __ B (def: q, use: p r) - # / - # A (def: p, use: q r) - # \ - # \_ C (def: r, use: p q) - # - # We need to consider three cases. - # 1. Signal p requires an input port in B; - # 2. Signal r requires an output port in C; - # 3. Signal r requires an output port in C and an input port in B. - # - # Adding these ports can be in general done in three steps for each signal: - # 1. Find the least common ancestor of all uses and defs. - # 2. Going upwards from the single def, add output ports. - # 3. Going upwards from all uses, add input ports. - - parent = {self: None} - level = {self: 0} - uses = SignalDict() - defs = SignalDict() - ios = SignalDict() - self._prepare_use_def_graph(parent, level, uses, defs, ios, self) - - ports = SignalSet(ports) - if all_undef_as_ports: - for sig in uses: - if sig in defs: - continue - ports.add(sig) - for sig in ports: - if sig not in uses: - uses[sig] = set() - uses[sig].add(self) - - @memoize - def lca_of(fragu, fragv): - # Normalize fragu to be deeper than fragv. - if level[fragu] < level[fragv]: - fragu, fragv = fragv, fragu - # Find ancestor of fragu on the same level as fragv. - for _ in range(level[fragu] - level[fragv]): - fragu = parent[fragu] - # If fragv was the ancestor of fragv, we're done. - if fragu == fragv: - return fragu - # Otherwise, they are at the same level but in different branches. Step both fragu - # and fragv until we find the common ancestor. - while parent[fragu] != parent[fragv]: - fragu = parent[fragu] - fragv = parent[fragv] - return parent[fragu] - - for sig in uses: - if sig in defs: - lca = reduce(lca_of, uses[sig], defs[sig]) - else: - lca = reduce(lca_of, uses[sig]) - - for frag in uses[sig]: - if sig in defs and frag is defs[sig]: - continue - while frag != lca: - frag.add_ports(sig, dir="i") - frag = parent[frag] - - if sig in defs: - frag = defs[sig] - while frag != lca: - frag.add_ports(sig, dir="o") - frag = parent[frag] - - for sig in ios: - frag = ios[sig] - while frag is not None: - frag.add_ports(sig, dir="io") - frag = parent[frag] - - for sig in ports: - if sig in ios: - continue - if sig in defs: - self.add_ports(sig, dir="o") - else: - self.add_ports(sig, dir="i") - - def prepare(self, ports=None, missing_domain=lambda name: ClockDomain(name)): - from .xfrm import SampleLowerer, DomainLowerer - - fragment = SampleLowerer()(self) - new_domains = fragment._propagate_domains(missing_domain) - fragment = DomainLowerer()(fragment) - if ports is None: - fragment._propagate_ports(ports=(), all_undef_as_ports=True) - else: - if not isinstance(ports, tuple) and not isinstance(ports, list): - msg = "`ports` must be either a list or a tuple, not {!r}"\ - .format(ports) - if isinstance(ports, Value): - msg += " (did you mean `ports=(,)`, rather than `ports=`?)" - raise TypeError(msg) - mapped_ports = [] - # Lower late bound signals like ClockSignal() to ports. - port_lowerer = DomainLowerer(fragment.domains) - for port in ports: - if not isinstance(port, (Signal, ClockSignal, ResetSignal)): - raise TypeError("Only signals may be added as ports, not {!r}" - .format(port)) - mapped_ports.append(port_lowerer.on_value(port)) - # Add ports for all newly created missing clock domains, since not doing so defeats - # the purpose of domain auto-creation. (It's possible to refer to these ports before - # the domain actually exists through late binding, but it's inconvenient.) - for cd in new_domains: - mapped_ports.append(cd.clk) - if cd.rst is not None: - mapped_ports.append(cd.rst) - fragment._propagate_ports(ports=mapped_ports, all_undef_as_ports=False) - return fragment - - -class Instance(Fragment): - def __init__(self, type, *args, **kwargs): - super().__init__() - - self.type = type - self.parameters = OrderedDict() - self.named_ports = OrderedDict() - - for (kind, name, value) in args: - if kind == "a": - self.attrs[name] = value - elif kind == "p": - self.parameters[name] = value - elif kind in ("i", "o", "io"): - self.named_ports[name] = (Value.cast(value), kind) - else: - raise NameError("Instance argument {!r} should be a tuple (kind, name, value) " - "where kind is one of \"a\", \"p\", \"i\", \"o\", or \"io\"" - .format((kind, name, value))) - - for kw, arg in kwargs.items(): - if kw.startswith("a_"): - self.attrs[kw[2:]] = arg - elif kw.startswith("p_"): - self.parameters[kw[2:]] = arg - elif kw.startswith("i_"): - self.named_ports[kw[2:]] = (Value.cast(arg), "i") - elif kw.startswith("o_"): - self.named_ports[kw[2:]] = (Value.cast(arg), "o") - elif kw.startswith("io_"): - self.named_ports[kw[3:]] = (Value.cast(arg), "io") - else: - raise NameError("Instance keyword argument {}={!r} does not start with one of " - "\"a_\", \"p_\", \"i_\", \"o_\", or \"io_\"" - .format(kw, arg)) +warnings.warn("instead of nmigen.hdl.ir, use amaranth.hdl.ir", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/mem.py b/nmigen/hdl/mem.py index fd7c874..d3df2ae 100644 --- a/nmigen/hdl/mem.py +++ b/nmigen/hdl/mem.py @@ -1,322 +1,7 @@ -import operator -from collections import OrderedDict - -from .. import tracer -from .ast import * -from .ir import Elaboratable, Instance +from amaranth.hdl.mem import * +from amaranth.hdl.mem import __all__ -__all__ = ["Memory", "ReadPort", "WritePort", "DummyPort"] - - -class Memory: - """A word addressable storage. - - Parameters - ---------- - width : int - Access granularity. Each storage element of this memory is ``width`` bits in size. - depth : int - Word count. This memory contains ``depth`` storage elements. - init : list of int - Initial values. At power on, each storage element in this memory is initialized to - the corresponding element of ``init``, if any, or to zero otherwise. - Uninitialized memories are not currently supported. - name : str - Name hint for this memory. If ``None`` (default) the name is inferred from the variable - name this ``Signal`` is assigned to. - attrs : dict - Dictionary of synthesis attributes. - - Attributes - ---------- - width : int - depth : int - init : list of int - attrs : dict - """ - def __init__(self, *, width, depth, init=None, name=None, attrs=None, simulate=True): - if not isinstance(width, int) or width < 0: - raise TypeError("Memory width must be a non-negative integer, not {!r}" - .format(width)) - if not isinstance(depth, int) or depth < 0: - raise TypeError("Memory depth must be a non-negative integer, not {!r}" - .format(depth)) - - self.name = name or tracer.get_var_name(depth=2, default="$memory") - self.src_loc = tracer.get_src_loc() - - self.width = width - self.depth = depth - self.attrs = OrderedDict(() if attrs is None else attrs) - - # Array of signals for simulation. - self._array = Array() - if simulate: - for addr in range(self.depth): - self._array.append(Signal(self.width, name="{}({})" - .format(name or "memory", addr))) - - self.init = init - - @property - def init(self): - return self._init - - @init.setter - def init(self, new_init): - self._init = [] if new_init is None else list(new_init) - if len(self.init) > self.depth: - raise ValueError("Memory initialization value count exceed memory depth ({} > {})" - .format(len(self.init), self.depth)) - - try: - for addr in range(len(self._array)): - if addr < len(self._init): - self._array[addr].reset = operator.index(self._init[addr]) - else: - self._array[addr].reset = 0 - except TypeError as e: - raise TypeError("Memory initialization value at address {:x}: {}" - .format(addr, e)) from None - - def read_port(self, *, src_loc_at=0, **kwargs): - """Get a read port. - - See :class:`ReadPort` for details. - - Arguments - --------- - domain : str - transparent : bool - - Returns - ------- - An instance of :class:`ReadPort` associated with this memory. - """ - return ReadPort(self, src_loc_at=1 + src_loc_at, **kwargs) - - def write_port(self, *, src_loc_at=0, **kwargs): - """Get a write port. - - See :class:`WritePort` for details. - - Arguments - --------- - domain : str - granularity : int - - Returns - ------- - An instance of :class:`WritePort` associated with this memory. - """ - return WritePort(self, src_loc_at=1 + src_loc_at, **kwargs) - - def __getitem__(self, index): - """Simulation only.""" - return self._array[index] - - -class ReadPort(Elaboratable): - """A memory read port. - - Parameters - ---------- - memory : :class:`Memory` - Memory associated with the port. - domain : str - Clock domain. Defaults to ``"sync"``. If set to ``"comb"``, the port is asynchronous. - Otherwise, the read data becomes available on the next clock cycle. - transparent : bool - Port transparency. If set (default), a read at an address that is also being written to in - the same clock cycle will output the new value. Otherwise, the old value will be output - first. This behavior only applies to ports in the same domain. - - Attributes - ---------- - memory : :class:`Memory` - domain : str - transparent : bool - addr : Signal(range(memory.depth)), in - Read address. - data : Signal(memory.width), out - Read data. - en : Signal or Const, in - Read enable. If asserted, ``data`` is updated with the word stored at ``addr``. Note that - transparent ports cannot assign ``en`` (which is hardwired to 1 instead), as doing so is - currently not supported by Yosys. - - Exceptions - ---------- - Raises :exn:`ValueError` if the read port is simultaneously asynchronous and non-transparent. - """ - def __init__(self, memory, *, domain="sync", transparent=True, src_loc_at=0): - if domain == "comb" and not transparent: - raise ValueError("Read port cannot be simultaneously asynchronous and non-transparent") - - self.memory = memory - self.domain = domain - self.transparent = transparent - - self.addr = Signal(range(memory.depth), - name="{}_r_addr".format(memory.name), src_loc_at=1 + src_loc_at) - self.data = Signal(memory.width, - name="{}_r_data".format(memory.name), src_loc_at=1 + src_loc_at) - if self.domain != "comb" and not transparent: - self.en = Signal(name="{}_r_en".format(memory.name), reset=1, - src_loc_at=1 + src_loc_at) - else: - self.en = Const(1) - - def elaborate(self, platform): - f = Instance("$memrd", - p_MEMID=self.memory, - p_ABITS=self.addr.width, - p_WIDTH=self.data.width, - p_CLK_ENABLE=self.domain != "comb", - p_CLK_POLARITY=1, - p_TRANSPARENT=self.transparent, - i_CLK=ClockSignal(self.domain) if self.domain != "comb" else Const(0), - i_EN=self.en, - i_ADDR=self.addr, - o_DATA=self.data, - ) - if self.domain == "comb": - # Asynchronous port - f.add_statements(self.data.eq(self.memory._array[self.addr])) - f.add_driver(self.data) - elif not self.transparent: - # Synchronous, read-before-write port - f.add_statements( - Switch(self.en, { - 1: self.data.eq(self.memory._array[self.addr]) - }) - ) - f.add_driver(self.data, self.domain) - else: - # Synchronous, write-through port - # This model is a bit unconventional. We model transparent ports as asynchronous ports - # that are latched when the clock is high. This isn't exactly correct, but it is very - # close to the correct behavior of a transparent port, and the difference should only - # be observable in pathological cases of clock gating. A register is injected to - # the address input to achieve the correct address-to-data latency. Also, the reset - # value of the data output is forcibly set to the 0th initial value, if any--note that - # many FPGAs do not guarantee this behavior! - if len(self.memory.init) > 0: - self.data.reset = operator.index(self.memory.init[0]) - latch_addr = Signal.like(self.addr) - f.add_statements( - latch_addr.eq(self.addr), - Switch(ClockSignal(self.domain), { - 0: self.data.eq(self.data), - 1: self.data.eq(self.memory._array[latch_addr]), - }), - ) - f.add_driver(latch_addr, self.domain) - f.add_driver(self.data) - return f - - -class WritePort(Elaboratable): - """A memory write port. - - Parameters - ---------- - memory : :class:`Memory` - Memory associated with the port. - domain : str - Clock domain. Defaults to ``"sync"``. Writes have a latency of 1 clock cycle. - granularity : int - Port granularity. Defaults to ``memory.width``. Write data is split evenly in - ``memory.width // granularity`` chunks, which can be updated independently. - - Attributes - ---------- - memory : :class:`Memory` - domain : str - granularity : int - addr : Signal(range(memory.depth)), in - Write address. - data : Signal(memory.width), in - Write data. - en : Signal(memory.width // granularity), in - Write enable. Each bit selects a non-overlapping chunk of ``granularity`` bits on the - ``data`` signal, which is written to memory at ``addr``. Unselected chunks are ignored. - - Exceptions - ---------- - Raises :exn:`ValueError` if the write port granularity is greater than memory width, or does not - divide memory width evenly. - """ - def __init__(self, memory, *, domain="sync", granularity=None, src_loc_at=0): - if granularity is None: - granularity = memory.width - if not isinstance(granularity, int) or granularity < 0: - raise TypeError("Write port granularity must be a non-negative integer, not {!r}" - .format(granularity)) - if granularity > memory.width: - raise ValueError("Write port granularity must not be greater than memory width " - "({} > {})" - .format(granularity, memory.width)) - if memory.width // granularity * granularity != memory.width: - raise ValueError("Write port granularity must divide memory width evenly") - - self.memory = memory - self.domain = domain - self.granularity = granularity - - self.addr = Signal(range(memory.depth), - name="{}_w_addr".format(memory.name), src_loc_at=1 + src_loc_at) - self.data = Signal(memory.width, - name="{}_w_data".format(memory.name), src_loc_at=1 + src_loc_at) - self.en = Signal(memory.width // granularity, - name="{}_w_en".format(memory.name), src_loc_at=1 + src_loc_at) - - def elaborate(self, platform): - f = Instance("$memwr", - p_MEMID=self.memory, - p_ABITS=self.addr.width, - p_WIDTH=self.data.width, - p_CLK_ENABLE=1, - p_CLK_POLARITY=1, - p_PRIORITY=0, - i_CLK=ClockSignal(self.domain), - i_EN=Cat(Repl(en_bit, self.granularity) for en_bit in self.en), - i_ADDR=self.addr, - i_DATA=self.data, - ) - if len(self.en) > 1: - for index, en_bit in enumerate(self.en): - offset = index * self.granularity - bits = slice(offset, offset + self.granularity) - write_data = self.memory._array[self.addr][bits].eq(self.data[bits]) - f.add_statements(Switch(en_bit, { 1: write_data })) - else: - write_data = self.memory._array[self.addr].eq(self.data) - f.add_statements(Switch(self.en, { 1: write_data })) - for signal in self.memory._array: - f.add_driver(signal, self.domain) - return f - - -class DummyPort: - """Dummy memory port. - - This port can be used in place of either a read or a write port for testing and verification. - It does not include any read/write port specific attributes, i.e. none besides ``"domain"``; - any such attributes may be set manually. - """ - def __init__(self, *, data_width, addr_width, domain="sync", name=None, granularity=None): - self.domain = domain - - if granularity is None: - granularity = data_width - if name is None: - name = tracer.get_var_name(depth=2, default="dummy") - - self.addr = Signal(addr_width, - name="{}_addr".format(name), src_loc_at=1) - self.data = Signal(data_width, - name="{}_data".format(name), src_loc_at=1) - self.en = Signal(data_width // granularity, - name="{}_en".format(name), src_loc_at=1) +import warnings +warnings.warn("instead of nmigen.hdl.mem, use amaranth.hdl.mem", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/rec.py b/nmigen/hdl/rec.py index 125bb4c..e8b822d 100644 --- a/nmigen/hdl/rec.py +++ b/nmigen/hdl/rec.py @@ -1,278 +1,7 @@ -from enum import Enum -from collections import OrderedDict -from functools import reduce, wraps - -from .. import tracer -from .._utils import union -from .ast import * +from amaranth.hdl.rec import * +from amaranth.hdl.rec import __all__ -__all__ = ["Direction", "DIR_NONE", "DIR_FANOUT", "DIR_FANIN", "Layout", "Record"] - - -Direction = Enum('Direction', ('NONE', 'FANOUT', 'FANIN')) - -DIR_NONE = Direction.NONE -DIR_FANOUT = Direction.FANOUT -DIR_FANIN = Direction.FANIN - - -class Layout: - @staticmethod - def cast(obj, *, src_loc_at=0): - if isinstance(obj, Layout): - return obj - return Layout(obj, src_loc_at=1 + src_loc_at) - - def __init__(self, fields, *, src_loc_at=0): - self.fields = OrderedDict() - for field in fields: - if not isinstance(field, tuple) or len(field) not in (2, 3): - raise TypeError("Field {!r} has invalid layout: should be either " - "(name, shape) or (name, shape, direction)" - .format(field)) - if len(field) == 2: - name, shape = field - direction = DIR_NONE - if isinstance(shape, list): - shape = Layout.cast(shape) - else: - name, shape, direction = field - if not isinstance(direction, Direction): - raise TypeError("Field {!r} has invalid direction: should be a Direction " - "instance like DIR_FANIN" - .format(field)) - if not isinstance(name, str): - raise TypeError("Field {!r} has invalid name: should be a string" - .format(field)) - if not isinstance(shape, Layout): - try: - # Check provided shape by calling Shape.cast and checking for exception - Shape.cast(shape, src_loc_at=1 + src_loc_at) - except Exception: - raise TypeError("Field {!r} has invalid shape: should be castable to Shape " - "or a list of fields of a nested record" - .format(field)) - if name in self.fields: - raise NameError("Field {!r} has a name that is already present in the layout" - .format(field)) - self.fields[name] = (shape, direction) - - def __getitem__(self, item): - if isinstance(item, tuple): - return Layout([ - (name, shape, dir) - for (name, (shape, dir)) in self.fields.items() - if name in item - ]) - - return self.fields[item] - - def __iter__(self): - for name, (shape, dir) in self.fields.items(): - yield (name, shape, dir) - - def __eq__(self, other): - return self.fields == other.fields - - def __repr__(self): - field_reprs = [] - for name, shape, dir in self: - if dir == DIR_NONE: - field_reprs.append("({!r}, {!r})".format(name, shape)) - else: - field_reprs.append("({!r}, {!r}, Direction.{})".format(name, shape, dir.name)) - return "Layout([{}])".format(", ".join(field_reprs)) - - -class Record(ValueCastable): - @staticmethod - def like(other, *, name=None, name_suffix=None, src_loc_at=0): - if name is not None: - new_name = str(name) - elif name_suffix is not None: - new_name = other.name + str(name_suffix) - else: - new_name = tracer.get_var_name(depth=2 + src_loc_at, default=None) - - def concat(a, b): - if a is None: - return b - return "{}__{}".format(a, b) - - fields = {} - for field_name in other.fields: - field = other[field_name] - if isinstance(field, Record): - fields[field_name] = Record.like(field, name=concat(new_name, field_name), - src_loc_at=1 + src_loc_at) - else: - fields[field_name] = Signal.like(field, name=concat(new_name, field_name), - src_loc_at=1 + src_loc_at) - - return Record(other.layout, name=new_name, fields=fields, src_loc_at=1) - - def __init__(self, layout, *, name=None, fields=None, src_loc_at=0): - if name is None: - name = tracer.get_var_name(depth=2 + src_loc_at, default=None) - - self.name = name - self.src_loc = tracer.get_src_loc(src_loc_at) - - def concat(a, b): - if a is None: - return b - return "{}__{}".format(a, b) - - self.layout = Layout.cast(layout, src_loc_at=1 + src_loc_at) - self.fields = OrderedDict() - for field_name, field_shape, field_dir in self.layout: - if fields is not None and field_name in fields: - field = fields[field_name] - if isinstance(field_shape, Layout): - assert isinstance(field, Record) and field_shape == field.layout - else: - assert isinstance(field, Signal) and Shape.cast(field_shape) == field.shape() - self.fields[field_name] = field - else: - if isinstance(field_shape, Layout): - self.fields[field_name] = Record(field_shape, name=concat(name, field_name), - src_loc_at=1 + src_loc_at) - else: - self.fields[field_name] = Signal(field_shape, name=concat(name, field_name), - src_loc_at=1 + src_loc_at) - - def __getattr__(self, name): - return self[name] - - def __getitem__(self, item): - if isinstance(item, str): - try: - return self.fields[item] - except KeyError: - if self.name is None: - reference = "Unnamed record" - else: - reference = "Record '{}'".format(self.name) - raise AttributeError("{} does not have a field '{}'. Did you mean one of: {}?" - .format(reference, item, ", ".join(self.fields))) from None - elif isinstance(item, tuple): - return Record(self.layout[item], fields={ - field_name: field_value - for field_name, field_value in self.fields.items() - if field_name in item - }) - else: - try: - return Value.__getitem__(self, item) - except KeyError: - if self.name is None: - reference = "Unnamed record" - else: - reference = "Record '{}'".format(self.name) - raise AttributeError("{} does not have a field '{}'. Did you mean one of: {}?" - .format(reference, item, ", ".join(self.fields))) from None - - @ValueCastable.lowermethod - def as_value(self): - return Cat(self.fields.values()) - - def __len__(self): - return len(self.as_value()) - - def _lhs_signals(self): - return union((f._lhs_signals() for f in self.fields.values()), start=SignalSet()) - - def _rhs_signals(self): - return union((f._rhs_signals() for f in self.fields.values()), start=SignalSet()) - - def __repr__(self): - fields = [] - for field_name, field in self.fields.items(): - if isinstance(field, Signal): - fields.append(field_name) - else: - fields.append(repr(field)) - name = self.name - if name is None: - name = "" - return "(rec {} {})".format(name, " ".join(fields)) - - def shape(self): - return self.as_value().shape() - - def connect(self, *subordinates, include=None, exclude=None): - def rec_name(record): - if record.name is None: - return "unnamed record" - else: - return "record '{}'".format(record.name) - - for field in include or {}: - if field not in self.fields: - raise AttributeError("Cannot include field '{}' because it is not present in {}" - .format(field, rec_name(self))) - for field in exclude or {}: - if field not in self.fields: - raise AttributeError("Cannot exclude field '{}' because it is not present in {}" - .format(field, rec_name(self))) - - stmts = [] - for field in self.fields: - if include is not None and field not in include: - continue - if exclude is not None and field in exclude: - continue - - shape, direction = self.layout[field] - if not isinstance(shape, Layout) and direction == DIR_NONE: - raise TypeError("Cannot connect field '{}' of {} because it does not have " - "a direction" - .format(field, rec_name(self))) - - item = self.fields[field] - subord_items = [] - for subord in subordinates: - if field not in subord.fields: - raise AttributeError("Cannot connect field '{}' of {} to subordinate {} " - "because the subordinate record does not have this field" - .format(field, rec_name(self), rec_name(subord))) - subord_items.append(subord.fields[field]) - - if isinstance(shape, Layout): - sub_include = include[field] if include and field in include else None - sub_exclude = exclude[field] if exclude and field in exclude else None - stmts += item.connect(*subord_items, include=sub_include, exclude=sub_exclude) - else: - if direction == DIR_FANOUT: - stmts += [sub_item.eq(item) for sub_item in subord_items] - if direction == DIR_FANIN: - stmts += [item.eq(reduce(lambda a, b: a | b, subord_items))] - - return stmts - -def _valueproxy(name): - value_func = getattr(Value, name) - @wraps(value_func) - def _wrapper(self, *args, **kwargs): - return value_func(Value.cast(self), *args, **kwargs) - return _wrapper - -for name in [ - "__bool__", - "__invert__", "__neg__", - "__add__", "__radd__", "__sub__", "__rsub__", - "__mul__", "__rmul__", - "__mod__", "__rmod__", "__floordiv__", "__rfloordiv__", - "__lshift__", "__rlshift__", "__rshift__", "__rrshift__", - "__and__", "__rand__", "__xor__", "__rxor__", "__or__", "__ror__", - "__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__", - "__abs__", "__len__", - "as_unsigned", "as_signed", "bool", "any", "all", "xor", "implies", - "bit_select", "word_select", "matches", - "shift_left", "shift_right", "rotate_left", "rotate_right", "eq" - ]: - setattr(Record, name, _valueproxy(name)) - -del _valueproxy -del name +import warnings +warnings.warn("instead of nmigen.hdl.rec, use amaranth.hdl.rec", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/hdl/xfrm.py b/nmigen/hdl/xfrm.py index 97f8c53..f182ee2 100644 --- a/nmigen/hdl/xfrm.py +++ b/nmigen/hdl/xfrm.py @@ -1,743 +1,7 @@ -from abc import ABCMeta, abstractmethod -from collections import OrderedDict -from collections.abc import Iterable +from amaranth.hdl.xfrm import * +from amaranth.hdl.xfrm import __all__ -from .._utils import flatten -from .. import tracer -from .ast import * -from .ast import _StatementList -from .cd import * -from .ir import * -from .rec import * - -__all__ = ["ValueVisitor", "ValueTransformer", - "StatementVisitor", "StatementTransformer", - "FragmentTransformer", - "TransformedElaboratable", - "DomainCollector", "DomainRenamer", "DomainLowerer", - "SampleDomainInjector", "SampleLowerer", - "SwitchCleaner", "LHSGroupAnalyzer", "LHSGroupFilter", - "ResetInserter", "EnableInserter"] - - -class ValueVisitor(metaclass=ABCMeta): - @abstractmethod - def on_Const(self, value): - pass # :nocov: - - @abstractmethod - def on_AnyConst(self, value): - pass # :nocov: - - @abstractmethod - def on_AnySeq(self, value): - pass # :nocov: - - @abstractmethod - def on_Signal(self, value): - pass # :nocov: - - @abstractmethod - def on_ClockSignal(self, value): - pass # :nocov: - - @abstractmethod - def on_ResetSignal(self, value): - pass # :nocov: - - @abstractmethod - def on_Operator(self, value): - pass # :nocov: - - @abstractmethod - def on_Slice(self, value): - pass # :nocov: - - @abstractmethod - def on_Part(self, value): - pass # :nocov: - - @abstractmethod - def on_Cat(self, value): - pass # :nocov: - - @abstractmethod - def on_Repl(self, value): - pass # :nocov: - - @abstractmethod - def on_ArrayProxy(self, value): - pass # :nocov: - - @abstractmethod - def on_Sample(self, value): - pass # :nocov: - - @abstractmethod - def on_Initial(self, value): - pass # :nocov: - - def on_unknown_value(self, value): - raise TypeError("Cannot transform value {!r}".format(value)) # :nocov: - - def replace_value_src_loc(self, value, new_value): - return True - - def on_value(self, value): - if type(value) is Const: - new_value = self.on_Const(value) - elif type(value) is AnyConst: - new_value = self.on_AnyConst(value) - elif type(value) is AnySeq: - new_value = self.on_AnySeq(value) - elif isinstance(value, Signal): - # Uses `isinstance()` and not `type() is` because nmigen.compat requires it. - new_value = self.on_Signal(value) - elif type(value) is ClockSignal: - new_value = self.on_ClockSignal(value) - elif type(value) is ResetSignal: - new_value = self.on_ResetSignal(value) - elif type(value) is Operator: - new_value = self.on_Operator(value) - elif type(value) is Slice: - new_value = self.on_Slice(value) - elif type(value) is Part: - new_value = self.on_Part(value) - elif type(value) is Cat: - new_value = self.on_Cat(value) - elif type(value) is Repl: - new_value = self.on_Repl(value) - elif type(value) is ArrayProxy: - new_value = self.on_ArrayProxy(value) - elif type(value) is Sample: - new_value = self.on_Sample(value) - elif type(value) is Initial: - new_value = self.on_Initial(value) - elif isinstance(value, UserValue): - # Uses `isinstance()` and not `type() is` to allow inheriting. - new_value = self.on_value(value._lazy_lower()) - else: - new_value = self.on_unknown_value(value) - if isinstance(new_value, Value) and self.replace_value_src_loc(value, new_value): - new_value.src_loc = value.src_loc - return new_value - - def __call__(self, value): - return self.on_value(value) - - -class ValueTransformer(ValueVisitor): - def on_Const(self, value): - return value - - def on_AnyConst(self, value): - return value - - def on_AnySeq(self, value): - return value - - def on_Signal(self, value): - return value - - def on_ClockSignal(self, value): - return value - - def on_ResetSignal(self, value): - return value - - def on_Operator(self, value): - return Operator(value.operator, [self.on_value(o) for o in value.operands]) - - def on_Slice(self, value): - return Slice(self.on_value(value.value), value.start, value.stop) - - def on_Part(self, value): - return Part(self.on_value(value.value), self.on_value(value.offset), - value.width, value.stride) - - def on_Cat(self, value): - return Cat(self.on_value(o) for o in value.parts) - - def on_Repl(self, value): - return Repl(self.on_value(value.value), value.count) - - def on_ArrayProxy(self, value): - return ArrayProxy([self.on_value(elem) for elem in value._iter_as_values()], - self.on_value(value.index)) - - def on_Sample(self, value): - return Sample(self.on_value(value.value), value.clocks, value.domain) - - def on_Initial(self, value): - return value - - -class StatementVisitor(metaclass=ABCMeta): - @abstractmethod - def on_Assign(self, stmt): - pass # :nocov: - - @abstractmethod - def on_Assert(self, stmt): - pass # :nocov: - - @abstractmethod - def on_Assume(self, stmt): - pass # :nocov: - - @abstractmethod - def on_Cover(self, stmt): - pass # :nocov: - - @abstractmethod - def on_Switch(self, stmt): - pass # :nocov: - - @abstractmethod - def on_statements(self, stmts): - pass # :nocov: - - def on_unknown_statement(self, stmt): - raise TypeError("Cannot transform statement {!r}".format(stmt)) # :nocov: - - def replace_statement_src_loc(self, stmt, new_stmt): - return True - - def on_statement(self, stmt): - if type(stmt) is Assign: - new_stmt = self.on_Assign(stmt) - elif type(stmt) is Assert: - new_stmt = self.on_Assert(stmt) - elif type(stmt) is Assume: - new_stmt = self.on_Assume(stmt) - elif type(stmt) is Cover: - new_stmt = self.on_Cover(stmt) - elif isinstance(stmt, Switch): - # Uses `isinstance()` and not `type() is` because nmigen.compat requires it. - new_stmt = self.on_Switch(stmt) - elif isinstance(stmt, Iterable): - new_stmt = self.on_statements(stmt) - else: - new_stmt = self.on_unknown_statement(stmt) - if isinstance(new_stmt, Statement) and self.replace_statement_src_loc(stmt, new_stmt): - new_stmt.src_loc = stmt.src_loc - if isinstance(new_stmt, Switch) and isinstance(stmt, Switch): - new_stmt.case_src_locs = stmt.case_src_locs - if isinstance(new_stmt, Property): - new_stmt._MustUse__used = True - return new_stmt - - def __call__(self, stmt): - return self.on_statement(stmt) - - -class StatementTransformer(StatementVisitor): - def on_value(self, value): - return value - - def on_Assign(self, stmt): - return Assign(self.on_value(stmt.lhs), self.on_value(stmt.rhs)) - - def on_Assert(self, stmt): - return Assert(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en) - - def on_Assume(self, stmt): - return Assume(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en) - - def on_Cover(self, stmt): - return Cover(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en) - - def on_Switch(self, stmt): - cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items()) - return Switch(self.on_value(stmt.test), cases) - - def on_statements(self, stmts): - return _StatementList(flatten(self.on_statement(stmt) for stmt in stmts)) - - -class FragmentTransformer: - def map_subfragments(self, fragment, new_fragment): - for subfragment, name in fragment.subfragments: - new_fragment.add_subfragment(self(subfragment), name) - - def map_ports(self, fragment, new_fragment): - for port, dir in fragment.ports.items(): - new_fragment.add_ports(port, dir=dir) - - def map_named_ports(self, fragment, new_fragment): - if hasattr(self, "on_value"): - for name, (value, dir) in fragment.named_ports.items(): - new_fragment.named_ports[name] = self.on_value(value), dir - else: - new_fragment.named_ports = OrderedDict(fragment.named_ports.items()) - - def map_domains(self, fragment, new_fragment): - for domain in fragment.iter_domains(): - new_fragment.add_domains(fragment.domains[domain]) - - def map_statements(self, fragment, new_fragment): - if hasattr(self, "on_statement"): - new_fragment.add_statements(map(self.on_statement, fragment.statements)) - else: - new_fragment.add_statements(fragment.statements) - - def map_drivers(self, fragment, new_fragment): - for domain, signal in fragment.iter_drivers(): - new_fragment.add_driver(signal, domain) - - def on_fragment(self, fragment): - if isinstance(fragment, Instance): - new_fragment = Instance(fragment.type) - new_fragment.parameters = OrderedDict(fragment.parameters) - self.map_named_ports(fragment, new_fragment) - else: - new_fragment = Fragment() - new_fragment.flatten = fragment.flatten - new_fragment.attrs = OrderedDict(fragment.attrs) - self.map_ports(fragment, new_fragment) - self.map_subfragments(fragment, new_fragment) - self.map_domains(fragment, new_fragment) - self.map_statements(fragment, new_fragment) - self.map_drivers(fragment, new_fragment) - return new_fragment - - def __call__(self, value, *, src_loc_at=0): - if isinstance(value, Fragment): - return self.on_fragment(value) - elif isinstance(value, TransformedElaboratable): - value._transforms_.append(self) - return value - elif hasattr(value, "elaborate"): - value = TransformedElaboratable(value, src_loc_at=1 + src_loc_at) - value._transforms_.append(self) - return value - else: - raise AttributeError("Object {!r} cannot be elaborated".format(value)) - - -class TransformedElaboratable(Elaboratable): - def __init__(self, elaboratable, *, src_loc_at=0): - assert hasattr(elaboratable, "elaborate") - - # Fields prefixed and suffixed with underscore to avoid as many conflicts with the inner - # object as possible, since we're forwarding attribute requests to it. - self._elaboratable_ = elaboratable - self._transforms_ = [] - - def __getattr__(self, attr): - return getattr(self._elaboratable_, attr) - - def elaborate(self, platform): - fragment = Fragment.get(self._elaboratable_, platform) - for transform in self._transforms_: - fragment = transform(fragment) - return fragment - - -class DomainCollector(ValueVisitor, StatementVisitor): - def __init__(self): - self.used_domains = set() - self.defined_domains = set() - self._local_domains = set() - - def _add_used_domain(self, domain_name): - if domain_name is None: - return - if domain_name in self._local_domains: - return - self.used_domains.add(domain_name) - - def on_ignore(self, value): - pass - - on_Const = on_ignore - on_AnyConst = on_ignore - on_AnySeq = on_ignore - on_Signal = on_ignore - - def on_ClockSignal(self, value): - self._add_used_domain(value.domain) - - def on_ResetSignal(self, value): - self._add_used_domain(value.domain) - - def on_Operator(self, value): - for o in value.operands: - self.on_value(o) - - def on_Slice(self, value): - self.on_value(value.value) - - def on_Part(self, value): - self.on_value(value.value) - self.on_value(value.offset) - - def on_Cat(self, value): - for o in value.parts: - self.on_value(o) - - def on_Repl(self, value): - self.on_value(value.value) - - def on_ArrayProxy(self, value): - for elem in value._iter_as_values(): - self.on_value(elem) - self.on_value(value.index) - - def on_Sample(self, value): - self.on_value(value.value) - - def on_Initial(self, value): - pass - - def on_Assign(self, stmt): - self.on_value(stmt.lhs) - self.on_value(stmt.rhs) - - def on_property(self, stmt): - self.on_value(stmt.test) - - on_Assert = on_property - on_Assume = on_property - on_Cover = on_property - - def on_Switch(self, stmt): - self.on_value(stmt.test) - for stmts in stmt.cases.values(): - self.on_statement(stmts) - - def on_statements(self, stmts): - for stmt in stmts: - self.on_statement(stmt) - - def on_fragment(self, fragment): - if isinstance(fragment, Instance): - for name, (value, dir) in fragment.named_ports.items(): - self.on_value(value) - - old_local_domains, self._local_domains = self._local_domains, set(self._local_domains) - for domain_name, domain in fragment.domains.items(): - if domain.local: - self._local_domains.add(domain_name) - else: - self.defined_domains.add(domain_name) - - self.on_statements(fragment.statements) - for domain_name in fragment.drivers: - self._add_used_domain(domain_name) - for subfragment, name in fragment.subfragments: - self.on_fragment(subfragment) - - self._local_domains = old_local_domains - - def __call__(self, fragment): - self.on_fragment(fragment) - - -class DomainRenamer(FragmentTransformer, ValueTransformer, StatementTransformer): - def __init__(self, domain_map): - if isinstance(domain_map, str): - domain_map = {"sync": domain_map} - for src, dst in domain_map.items(): - if src == "comb": - raise ValueError("Domain '{}' may not be renamed".format(src)) - if dst == "comb": - raise ValueError("Domain '{}' may not be renamed to '{}'".format(src, dst)) - self.domain_map = OrderedDict(domain_map) - - def on_ClockSignal(self, value): - if value.domain in self.domain_map: - return ClockSignal(self.domain_map[value.domain]) - return value - - def on_ResetSignal(self, value): - if value.domain in self.domain_map: - return ResetSignal(self.domain_map[value.domain], - allow_reset_less=value.allow_reset_less) - return value - - def map_domains(self, fragment, new_fragment): - for domain in fragment.iter_domains(): - cd = fragment.domains[domain] - if domain in self.domain_map: - if cd.name == domain: - # Rename the actual ClockDomain object. - cd.rename(self.domain_map[domain]) - else: - assert cd.name == self.domain_map[domain] - new_fragment.add_domains(cd) - - def map_drivers(self, fragment, new_fragment): - for domain, signals in fragment.drivers.items(): - if domain in self.domain_map: - domain = self.domain_map[domain] - for signal in signals: - new_fragment.add_driver(self.on_value(signal), domain) - - -class DomainLowerer(FragmentTransformer, ValueTransformer, StatementTransformer): - def __init__(self, domains=None): - self.domains = domains - - def _resolve(self, domain, context): - if domain not in self.domains: - raise DomainError("Signal {!r} refers to nonexistent domain '{}'" - .format(context, domain)) - return self.domains[domain] - - def map_drivers(self, fragment, new_fragment): - for domain, signal in fragment.iter_drivers(): - new_fragment.add_driver(self.on_value(signal), domain) - - def replace_value_src_loc(self, value, new_value): - return not isinstance(value, (ClockSignal, ResetSignal)) - - def on_ClockSignal(self, value): - domain = self._resolve(value.domain, value) - return domain.clk - - def on_ResetSignal(self, value): - domain = self._resolve(value.domain, value) - if domain.rst is None: - if value.allow_reset_less: - return Const(0) - else: - raise DomainError("Signal {!r} refers to reset of reset-less domain '{}'" - .format(value, value.domain)) - return domain.rst - - def _insert_resets(self, fragment): - for domain_name, signals in fragment.drivers.items(): - if domain_name is None: - continue - domain = fragment.domains[domain_name] - if domain.rst is None: - continue - stmts = [signal.eq(Const(signal.reset, signal.width)) - for signal in signals if not signal.reset_less] - fragment.add_statements(Switch(domain.rst, {1: stmts})) - - def on_fragment(self, fragment): - self.domains = fragment.domains - new_fragment = super().on_fragment(fragment) - self._insert_resets(new_fragment) - return new_fragment - - -class SampleDomainInjector(ValueTransformer, StatementTransformer): - def __init__(self, domain): - self.domain = domain - - def on_Sample(self, value): - if value.domain is not None: - return value - return Sample(value.value, value.clocks, self.domain) - - def __call__(self, stmts): - return self.on_statement(stmts) - - -class SampleLowerer(FragmentTransformer, ValueTransformer, StatementTransformer): - def __init__(self): - self.initial = None - self.sample_cache = None - self.sample_stmts = None - - def _name_reset(self, value): - if isinstance(value, Const): - return "c${}".format(value.value), value.value - elif isinstance(value, Signal): - return "s${}".format(value.name), value.reset - elif isinstance(value, ClockSignal): - return "clk", 0 - elif isinstance(value, ResetSignal): - return "rst", 1 - elif isinstance(value, Initial): - return "init", 0 # Past(Initial()) produces 0, 1, 0, 0, ... - else: - raise NotImplementedError # :nocov: - - def on_Sample(self, value): - if value in self.sample_cache: - return self.sample_cache[value] - - sampled_value = self.on_value(value.value) - if value.clocks == 0: - sample = sampled_value - else: - assert value.domain is not None - sampled_name, sampled_reset = self._name_reset(value.value) - name = "$sample${}${}${}".format(sampled_name, value.domain, value.clocks) - sample = Signal.like(value.value, name=name, reset_less=True, reset=sampled_reset) - sample.attrs["nmigen.sample_reg"] = True - - prev_sample = self.on_Sample(Sample(sampled_value, value.clocks - 1, value.domain)) - if value.domain not in self.sample_stmts: - self.sample_stmts[value.domain] = [] - self.sample_stmts[value.domain].append(sample.eq(prev_sample)) - - self.sample_cache[value] = sample - return sample - - def on_Initial(self, value): - if self.initial is None: - self.initial = Signal(name="init") - return self.initial - - def map_statements(self, fragment, new_fragment): - self.initial = None - self.sample_cache = ValueDict() - self.sample_stmts = OrderedDict() - new_fragment.add_statements(map(self.on_statement, fragment.statements)) - for domain, stmts in self.sample_stmts.items(): - new_fragment.add_statements(stmts) - for stmt in stmts: - new_fragment.add_driver(stmt.lhs, domain) - if self.initial is not None: - new_fragment.add_subfragment(Instance("$initstate", o_Y=self.initial)) - - -class SwitchCleaner(StatementVisitor): - def on_ignore(self, stmt): - return stmt - - on_Assign = on_ignore - on_Assert = on_ignore - on_Assume = on_ignore - on_Cover = on_ignore - - def on_Switch(self, stmt): - cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items()) - if any(len(s) for s in cases.values()): - return Switch(stmt.test, cases) - - def on_statements(self, stmts): - stmts = flatten(self.on_statement(stmt) for stmt in stmts) - return _StatementList(stmt for stmt in stmts if stmt is not None) - - -class LHSGroupAnalyzer(StatementVisitor): - def __init__(self): - self.signals = SignalDict() - self.unions = OrderedDict() - - def find(self, signal): - if signal not in self.signals: - self.signals[signal] = len(self.signals) - group = self.signals[signal] - while group in self.unions: - group = self.unions[group] - self.signals[signal] = group - return group - - def unify(self, root, *leaves): - root_group = self.find(root) - for leaf in leaves: - leaf_group = self.find(leaf) - if root_group == leaf_group: - continue - self.unions[leaf_group] = root_group - - def groups(self): - groups = OrderedDict() - for signal in self.signals: - group = self.find(signal) - if group not in groups: - groups[group] = SignalSet() - groups[group].add(signal) - return groups - - def on_Assign(self, stmt): - lhs_signals = stmt._lhs_signals() - if lhs_signals: - self.unify(*stmt._lhs_signals()) - - def on_property(self, stmt): - lhs_signals = stmt._lhs_signals() - if lhs_signals: - self.unify(*stmt._lhs_signals()) - - on_Assert = on_property - on_Assume = on_property - on_Cover = on_property - - def on_Switch(self, stmt): - for case_stmts in stmt.cases.values(): - self.on_statements(case_stmts) - - def on_statements(self, stmts): - for stmt in stmts: - self.on_statement(stmt) - - def __call__(self, stmts): - self.on_statements(stmts) - return self.groups() - - -class LHSGroupFilter(SwitchCleaner): - def __init__(self, signals): - self.signals = signals - - def on_Assign(self, stmt): - # The invariant provided by LHSGroupAnalyzer is that all signals that ever appear together - # on LHS are a part of the same group, so it is sufficient to check any of them. - lhs_signals = stmt.lhs._lhs_signals() - if lhs_signals: - any_lhs_signal = next(iter(lhs_signals)) - if any_lhs_signal in self.signals: - return stmt - - def on_property(self, stmt): - any_lhs_signal = next(iter(stmt._lhs_signals())) - if any_lhs_signal in self.signals: - return stmt - - on_Assert = on_property - on_Assume = on_property - on_Cover = on_property - - -class _ControlInserter(FragmentTransformer): - def __init__(self, controls): - self.src_loc = None - if isinstance(controls, Value): - controls = {"sync": controls} - self.controls = OrderedDict(controls) - - def on_fragment(self, fragment): - new_fragment = super().on_fragment(fragment) - for domain, signals in fragment.drivers.items(): - if domain is None or domain not in self.controls: - continue - self._insert_control(new_fragment, domain, signals) - return new_fragment - - def _insert_control(self, fragment, domain, signals): - raise NotImplementedError # :nocov: - - def __call__(self, value, *, src_loc_at=0): - self.src_loc = tracer.get_src_loc(src_loc_at=src_loc_at) - return super().__call__(value, src_loc_at=1 + src_loc_at) - - -class ResetInserter(_ControlInserter): - def _insert_control(self, fragment, domain, signals): - stmts = [s.eq(Const(s.reset, s.width)) for s in signals if not s.reset_less] - fragment.add_statements(Switch(self.controls[domain], {1: stmts}, src_loc=self.src_loc)) - - -class EnableInserter(_ControlInserter): - def _insert_control(self, fragment, domain, signals): - stmts = [s.eq(s) for s in signals] - fragment.add_statements(Switch(self.controls[domain], {0: stmts}, src_loc=self.src_loc)) - - def on_fragment(self, fragment): - new_fragment = super().on_fragment(fragment) - if isinstance(new_fragment, Instance) and new_fragment.type in ("$memrd", "$memwr"): - clk_port, clk_dir = new_fragment.named_ports["CLK"] - if isinstance(clk_port, ClockSignal) and clk_port.domain in self.controls: - en_port, en_dir = new_fragment.named_ports["EN"] - en_port = Mux(self.controls[clk_port.domain], en_port, Const(0, len(en_port))) - new_fragment.named_ports["EN"] = en_port, en_dir - return new_fragment +import warnings +warnings.warn("instead of nmigen.hdl.xfrm, use amaranth.hdl.xfrm", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/lib/__init__.py b/nmigen/lib/__init__.py index e69de29..413d959 100644 --- a/nmigen/lib/__init__.py +++ b/nmigen/lib/__init__.py @@ -0,0 +1,6 @@ +from amaranth.lib import * + + +import warnings +warnings.warn("instead of nmigen.lib, use amaranth.lib", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/lib/cdc.py b/nmigen/lib/cdc.py index 9278c8e..bc23f22 100644 --- a/nmigen/lib/cdc.py +++ b/nmigen/lib/cdc.py @@ -1,267 +1,7 @@ -from .. import * +from amaranth.lib.cdc import * +from amaranth.lib.cdc import __all__ -__all__ = ["FFSynchronizer", "AsyncFFSynchronizer", "ResetSynchronizer", "PulseSynchronizer"] - - -def _check_stages(stages): - if not isinstance(stages, int) or stages < 1: - raise TypeError("Synchronization stage count must be a positive integer, not {!r}" - .format(stages)) - if stages < 2: - raise ValueError("Synchronization stage count may not safely be less than 2") - - -class FFSynchronizer(Elaboratable): - """Resynchronise a signal to a different clock domain. - - Consists of a chain of flip-flops. Eliminates metastabilities at the output, but provides - no other guarantee as to the safe domain-crossing of a signal. - - Parameters - ---------- - i : Signal(n), in - Signal to be resynchronised. - o : Signal(n), out - Signal connected to synchroniser output. - o_domain : str - Name of output clock domain. - reset : int - Reset value of the flip-flops. On FPGAs, even if ``reset_less`` is True, - the :class:`FFSynchronizer` is still set to this value during initialization. - reset_less : bool - If ``True`` (the default), this :class:`FFSynchronizer` is unaffected by ``o_domain`` - reset. See "Note on Reset" below. - stages : int - Number of synchronization stages between input and output. The lowest safe number is 2, - with higher numbers reducing MTBF further, at the cost of increased latency. - max_input_delay : None or float - Maximum delay from the input signal's clock to the first synchronization stage, in seconds. - If specified and the platform does not support it, elaboration will fail. - - Platform override - ----------------- - Define the ``get_ff_sync`` platform method to override the implementation of - :class:`FFSynchronizer`, e.g. to instantiate library cells directly. - - Note on Reset - ------------- - :class:`FFSynchronizer` is non-resettable by default. Usually this is the safest option; - on FPGAs the :class:`FFSynchronizer` will still be initialized to its ``reset`` value when - the FPGA loads its configuration. - - However, in designs where the value of the :class:`FFSynchronizer` must be valid immediately - after reset, consider setting ``reset_less`` to False if any of the following is true: - - - You are targeting an ASIC, or an FPGA that does not allow arbitrary initial flip-flop states; - - Your design features warm (non-power-on) resets of ``o_domain``, so the one-time - initialization at power on is insufficient; - - Your design features a sequenced reset, and the :class:`FFSynchronizer` must maintain - its reset value until ``o_domain`` reset specifically is deasserted. - - :class:`FFSynchronizer` is reset by the ``o_domain`` reset only. - """ - def __init__(self, i, o, *, o_domain="sync", reset=0, reset_less=True, stages=2, - max_input_delay=None): - _check_stages(stages) - - self.i = i - self.o = o - - self._reset = reset - self._reset_less = reset_less - self._o_domain = o_domain - self._stages = stages - - self._max_input_delay = max_input_delay - - def elaborate(self, platform): - if hasattr(platform, "get_ff_sync"): - return platform.get_ff_sync(self) - - if self._max_input_delay is not None: - raise NotImplementedError("Platform '{}' does not support constraining input delay " - "for FFSynchronizer" - .format(type(platform).__name__)) - - m = Module() - flops = [Signal(self.i.shape(), name="stage{}".format(index), - reset=self._reset, reset_less=self._reset_less) - for index in range(self._stages)] - for i, o in zip((self.i, *flops), flops): - m.d[self._o_domain] += o.eq(i) - m.d.comb += self.o.eq(flops[-1]) - return m - - -class AsyncFFSynchronizer(Elaboratable): - """Synchronize deassertion of an asynchronous signal. - - The signal driven by the :class:`AsyncFFSynchronizer` is asserted asynchronously and deasserted - synchronously, eliminating metastability during deassertion. - - This synchronizer is primarily useful for resets and reset-like signals. - - Parameters - ---------- - i : Signal(1), in - Asynchronous input signal, to be synchronized. - o : Signal(1), out - Synchronously released output signal. - o_domain : str - Name of clock domain to synchronize to. - stages : int, >=2 - Number of synchronization stages between input and output. The lowest safe number is 2, - with higher numbers reducing MTBF further, at the cost of increased deassertion latency. - async_edge : str - The edge of the input signal which causes the output to be set. Must be one of "pos" or "neg". - max_input_delay : None or float - Maximum delay from the input signal's clock to the first synchronization stage, in seconds. - If specified and the platform does not support it, elaboration will fail. - - Platform override - ----------------- - Define the ``get_async_ff_sync`` platform method to override the implementation of - :class:`AsyncFFSynchronizer`, e.g. to instantiate library cells directly. - """ - def __init__(self, i, o, *, o_domain="sync", stages=2, async_edge="pos", max_input_delay=None): - _check_stages(stages) - - if len(i) != 1: - raise ValueError("AsyncFFSynchronizer input width must be 1, not {}" - .format(len(i))) - if len(o) != 1: - raise ValueError("AsyncFFSynchronizer output width must be 1, not {}" - .format(len(o))) - - if async_edge not in ("pos", "neg"): - raise ValueError("AsyncFFSynchronizer async edge must be one of 'pos' or 'neg', " - "not {!r}" - .format(async_edge)) - - self.i = i - self.o = o - - self._o_domain = o_domain - self._stages = stages - - self._edge = async_edge - - self._max_input_delay = max_input_delay - - def elaborate(self, platform): - if hasattr(platform, "get_async_ff_sync"): - return platform.get_async_ff_sync(self) - - if self._max_input_delay is not None: - raise NotImplementedError("Platform '{}' does not support constraining input delay " - "for AsyncFFSynchronizer" - .format(type(platform).__name__)) - - m = Module() - m.domains += ClockDomain("async_ff", async_reset=True, local=True) - flops = [Signal(1, name="stage{}".format(index), reset=1) - for index in range(self._stages)] - for i, o in zip((0, *flops), flops): - m.d.async_ff += o.eq(i) - - if self._edge == "pos": - m.d.comb += ResetSignal("async_ff").eq(self.i) - else: - m.d.comb += ResetSignal("async_ff").eq(~self.i) - - m.d.comb += [ - ClockSignal("async_ff").eq(ClockSignal(self._o_domain)), - self.o.eq(flops[-1]) - ] - - return m - - -class ResetSynchronizer(Elaboratable): - """Synchronize deassertion of a clock domain reset. - - The reset of the clock domain driven by the :class:`ResetSynchronizer` is asserted - asynchronously and deasserted synchronously, eliminating metastability during deassertion. - - The driven clock domain could use a reset that is asserted either synchronously or - asynchronously; a reset is always deasserted synchronously. A domain with an asynchronously - asserted reset is useful if the clock of the domain may be gated, yet the domain still - needs to be reset promptly; otherwise, synchronously asserted reset (the default) should - be used. - - Parameters - ---------- - arst : Signal(1), in - Asynchronous reset signal, to be synchronized. - domain : str - Name of clock domain to reset. - stages : int, >=2 - Number of synchronization stages between input and output. The lowest safe number is 2, - with higher numbers reducing MTBF further, at the cost of increased deassertion latency. - max_input_delay : None or float - Maximum delay from the input signal's clock to the first synchronization stage, in seconds. - If specified and the platform does not support it, elaboration will fail. - - Platform override - ----------------- - Define the ``get_reset_sync`` platform method to override the implementation of - :class:`ResetSynchronizer`, e.g. to instantiate library cells directly. - """ - def __init__(self, arst, *, domain="sync", stages=2, max_input_delay=None): - _check_stages(stages) - - self.arst = arst - - self._domain = domain - self._stages = stages - - self._max_input_delay = max_input_delay - - def elaborate(self, platform): - return AsyncFFSynchronizer(self.arst, ResetSignal(self._domain), o_domain=self._domain, - stages=self._stages, max_input_delay=self._max_input_delay) - - -class PulseSynchronizer(Elaboratable): - """A one-clock pulse on the input produces a one-clock pulse on the output. - - If the output clock is faster than the input clock, then the input may be safely asserted at - 100% duty cycle. Otherwise, if the clock ratio is `n`:1, the input may be asserted at most once - in every `n` input clocks, else pulses may be dropped. Other than this there is no constraint - on the ratio of input and output clock frequency. - - Parameters - ---------- - i_domain : str - Name of input clock domain. - o_domain : str - Name of output clock domain. - stages : int, >=2 - Number of synchronization stages between input and output. The lowest safe number is 2, - with higher numbers reducing MTBF further, at the cost of increased deassertion latency. - """ - def __init__(self, i_domain, o_domain, *, stages=2): - _check_stages(stages) - - self.i = Signal() - self.o = Signal() - - self._i_domain = i_domain - self._o_domain = o_domain - self._stages = stages - - def elaborate(self, platform): - m = Module() - - i_toggle = Signal() - o_toggle = Signal() - r_toggle = Signal() - ff_sync = m.submodules.ff_sync = \ - FFSynchronizer(i_toggle, o_toggle, o_domain=self._o_domain, stages=self._stages) - - m.d[self._i_domain] += i_toggle.eq(i_toggle ^ self.i) - m.d[self._o_domain] += r_toggle.eq(o_toggle) - m.d.comb += self.o.eq(o_toggle ^ r_toggle) - - return m +import warnings +warnings.warn("instead of nmigen.lib.cdc, use amaranth.lib.cdc", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/lib/coding.py b/nmigen/lib/coding.py index 5abfbd7..07a6fbb 100644 --- a/nmigen/lib/coding.py +++ b/nmigen/lib/coding.py @@ -1,186 +1,7 @@ -"""Encoders and decoders between binary and one-hot representation.""" - -from .. import * +from amaranth.lib.coding import * +from amaranth.lib.coding import __all__ -__all__ = [ - "Encoder", "Decoder", - "PriorityEncoder", "PriorityDecoder", - "GrayEncoder", "GrayDecoder", -] - - -class Encoder(Elaboratable): - """Encode one-hot to binary. - - If one bit in ``i`` is asserted, ``n`` is low and ``o`` indicates the asserted bit. - Otherwise, ``n`` is high and ``o`` is ``0``. - - Parameters - ---------- - width : int - Bit width of the input - - Attributes - ---------- - i : Signal(width), in - One-hot input. - o : Signal(range(width)), out - Encoded binary. - n : Signal, out - Invalid: either none or multiple input bits are asserted. - """ - def __init__(self, width): - self.width = width - - self.i = Signal(width) - self.o = Signal(range(width)) - self.n = Signal() - - def elaborate(self, platform): - m = Module() - with m.Switch(self.i): - for j in range(self.width): - with m.Case(1 << j): - m.d.comb += self.o.eq(j) - with m.Case(): - m.d.comb += self.n.eq(1) - return m - - -class PriorityEncoder(Elaboratable): - """Priority encode requests to binary. - - If any bit in ``i`` is asserted, ``n`` is low and ``o`` indicates the least significant - asserted bit. - Otherwise, ``n`` is high and ``o`` is ``0``. - - Parameters - ---------- - width : int - Bit width of the input. - - Attributes - ---------- - i : Signal(width), in - Input requests. - o : Signal(range(width)), out - Encoded binary. - n : Signal, out - Invalid: no input bits are asserted. - """ - def __init__(self, width): - self.width = width - - self.i = Signal(width) - self.o = Signal(range(width)) - self.n = Signal() - - def elaborate(self, platform): - m = Module() - for j in reversed(range(self.width)): - with m.If(self.i[j]): - m.d.comb += self.o.eq(j) - m.d.comb += self.n.eq(self.i == 0) - return m - - -class Decoder(Elaboratable): - """Decode binary to one-hot. - - If ``n`` is low, only the ``i``th bit in ``o`` is asserted. - If ``n`` is high, ``o`` is ``0``. - - Parameters - ---------- - width : int - Bit width of the output. - - Attributes - ---------- - i : Signal(range(width)), in - Input binary. - o : Signal(width), out - Decoded one-hot. - n : Signal, in - Invalid, no output bits are to be asserted. - """ - def __init__(self, width): - self.width = width - - self.i = Signal(range(width)) - self.n = Signal() - self.o = Signal(width) - - def elaborate(self, platform): - m = Module() - with m.Switch(self.i): - for j in range(len(self.o)): - with m.Case(j): - m.d.comb += self.o.eq(1 << j) - with m.If(self.n): - m.d.comb += self.o.eq(0) - return m - - -class PriorityDecoder(Decoder): - """Decode binary to priority request. - - Identical to :class:`Decoder`. - """ - - -class GrayEncoder(Elaboratable): - """Encode binary to Gray code. - - Parameters - ---------- - width : int - Bit width. - - Attributes - ---------- - i : Signal(width), in - Input natural binary. - o : Signal(width), out - Encoded Gray code. - """ - def __init__(self, width): - self.width = width - - self.i = Signal(width) - self.o = Signal(width) - - def elaborate(self, platform): - m = Module() - m.d.comb += self.o.eq(self.i ^ self.i[1:]) - return m - - -class GrayDecoder(Elaboratable): - """Decode Gray code to binary. - - Parameters - ---------- - width : int - Bit width. - - Attributes - ---------- - i : Signal(width), in - Input Gray code. - o : Signal(width), out - Decoded natural binary. - """ - def __init__(self, width): - self.width = width - - self.i = Signal(width) - self.o = Signal(width) - - def elaborate(self, platform): - m = Module() - m.d.comb += self.o[-1].eq(self.i[-1]) - for i in reversed(range(self.width - 1)): - m.d.comb += self.o[i].eq(self.o[i + 1] ^ self.i[i]) - return m +import warnings +warnings.warn("instead of nmigen.lib.coding, use amaranth.lib.coding", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/lib/fifo.py b/nmigen/lib/fifo.py index defe261..ea0d578 100644 --- a/nmigen/lib/fifo.py +++ b/nmigen/lib/fifo.py @@ -1,529 +1,7 @@ -"""First-in first-out queues.""" +from amaranth.lib.fifo import * +from amaranth.lib.fifo import __all__ -from .. import * -from ..asserts import * -from .._utils import log2_int -from .coding import GrayEncoder, GrayDecoder -from .cdc import FFSynchronizer, AsyncFFSynchronizer - -__all__ = ["FIFOInterface", "SyncFIFO", "SyncFIFOBuffered", "AsyncFIFO", "AsyncFIFOBuffered"] - - -class FIFOInterface: - _doc_template = """ - {description} - - Parameters - ---------- - width : int - Bit width of data entries. - depth : int - Depth of the queue. If zero, the FIFO cannot be read from or written to. - {parameters} - - Attributes - ---------- - {attributes} - w_data : in, width - Input data. - w_rdy : out - Asserted if there is space in the queue, i.e. ``w_en`` can be asserted to write - a new entry. - w_en : in - Write strobe. Latches ``w_data`` into the queue. Does nothing if ``w_rdy`` is not asserted. - w_level : out - Number of unread entries. - {w_attributes} - r_data : out, width - Output data. {r_data_valid} - r_rdy : out - Asserted if there is an entry in the queue, i.e. ``r_en`` can be asserted to read - an existing entry. - r_en : in - Read strobe. Makes the next entry (if any) available on ``r_data`` at the next cycle. - Does nothing if ``r_rdy`` is not asserted. - r_level : out - Number of unread entries. - {r_attributes} - """ - - __doc__ = _doc_template.format(description=""" - Data written to the input interface (``w_data``, ``w_rdy``, ``w_en``) is buffered and can be - read at the output interface (``r_data``, ``r_rdy``, ``r_en`). The data entry written first - to the input also appears first on the output. - """, - parameters="", - r_data_valid="The conditions in which ``r_data`` is valid depends on the type of the queue.", - attributes=""" - fwft : bool - First-word fallthrough. If set, when ``r_rdy`` rises, the first entry is already - available, i.e. ``r_data`` is valid. Otherwise, after ``r_rdy`` rises, it is necessary - to strobe ``r_en`` for ``r_data`` to become valid. - """.strip(), - w_attributes="", - r_attributes="") - - def __init__(self, *, width, depth, fwft): - if not isinstance(width, int) or width < 0: - raise TypeError("FIFO width must be a non-negative integer, not {!r}" - .format(width)) - if not isinstance(depth, int) or depth < 0: - raise TypeError("FIFO depth must be a non-negative integer, not {!r}" - .format(depth)) - self.width = width - self.depth = depth - self.fwft = fwft - - self.w_data = Signal(width, reset_less=True) - self.w_rdy = Signal() # writable; not full - self.w_en = Signal() - self.w_level = Signal(range(depth + 1)) - - self.r_data = Signal(width, reset_less=True) - self.r_rdy = Signal() # readable; not empty - self.r_en = Signal() - self.r_level = Signal(range(depth + 1)) - - -def _incr(signal, modulo): - if modulo == 2 ** len(signal): - return signal + 1 - else: - return Mux(signal == modulo - 1, 0, signal + 1) - - -class SyncFIFO(Elaboratable, FIFOInterface): - __doc__ = FIFOInterface._doc_template.format( - description=""" - Synchronous first in, first out queue. - - Read and write interfaces are accessed from the same clock domain. If different clock domains - are needed, use :class:`AsyncFIFO`. - """.strip(), - parameters=""" - fwft : bool - First-word fallthrough. If set, when the queue is empty and an entry is written into it, - that entry becomes available on the output on the same clock cycle. Otherwise, it is - necessary to assert ``r_en`` for ``r_data`` to become valid. - """.strip(), - r_data_valid=""" - For FWFT queues, valid if ``r_rdy`` is asserted. For non-FWFT queues, valid on the next - cycle after ``r_rdy`` and ``r_en`` have been asserted. - """.strip(), - attributes="", - r_attributes="", - w_attributes="") - - def __init__(self, *, width, depth, fwft=True): - super().__init__(width=width, depth=depth, fwft=fwft) - - self.level = Signal(range(depth + 1)) - - def elaborate(self, platform): - m = Module() - if self.depth == 0: - m.d.comb += [ - self.w_rdy.eq(0), - self.r_rdy.eq(0), - ] - return m - - m.d.comb += [ - self.w_rdy.eq(self.level != self.depth), - self.r_rdy.eq(self.level != 0), - self.w_level.eq(self.level), - self.r_level.eq(self.level), - ] - - do_read = self.r_rdy & self.r_en - do_write = self.w_rdy & self.w_en - - storage = Memory(width=self.width, depth=self.depth) - w_port = m.submodules.w_port = storage.write_port() - r_port = m.submodules.r_port = storage.read_port( - domain="comb" if self.fwft else "sync", transparent=self.fwft) - produce = Signal(range(self.depth)) - consume = Signal(range(self.depth)) - - m.d.comb += [ - w_port.addr.eq(produce), - w_port.data.eq(self.w_data), - w_port.en.eq(self.w_en & self.w_rdy), - ] - with m.If(do_write): - m.d.sync += produce.eq(_incr(produce, self.depth)) - - m.d.comb += [ - r_port.addr.eq(consume), - self.r_data.eq(r_port.data), - ] - if not self.fwft: - m.d.comb += r_port.en.eq(self.r_en) - with m.If(do_read): - m.d.sync += consume.eq(_incr(consume, self.depth)) - - with m.If(do_write & ~do_read): - m.d.sync += self.level.eq(self.level + 1) - with m.If(do_read & ~do_write): - m.d.sync += self.level.eq(self.level - 1) - - if platform == "formal": - # TODO: move this logic to SymbiYosys - with m.If(Initial()): - m.d.comb += [ - Assume(produce < self.depth), - Assume(consume < self.depth), - ] - with m.If(produce == consume): - m.d.comb += Assume((self.level == 0) | (self.level == self.depth)) - with m.If(produce > consume): - m.d.comb += Assume(self.level == (produce - consume)) - with m.If(produce < consume): - m.d.comb += Assume(self.level == (self.depth + produce - consume)) - with m.Else(): - m.d.comb += [ - Assert(produce < self.depth), - Assert(consume < self.depth), - ] - with m.If(produce == consume): - m.d.comb += Assert((self.level == 0) | (self.level == self.depth)) - with m.If(produce > consume): - m.d.comb += Assert(self.level == (produce - consume)) - with m.If(produce < consume): - m.d.comb += Assert(self.level == (self.depth + produce - consume)) - - return m - - -class SyncFIFOBuffered(Elaboratable, FIFOInterface): - __doc__ = FIFOInterface._doc_template.format( - description=""" - Buffered synchronous first in, first out queue. - - This queue's interface is identical to :class:`SyncFIFO` configured as ``fwft=True``, but it - does not use asynchronous memory reads, which are incompatible with FPGA block RAMs. - - In exchange, the latency between an entry being written to an empty queue and that entry - becoming available on the output is increased by one cycle compared to :class:`SyncFIFO`. - """.strip(), - parameters=""" - fwft : bool - Always set. - """.strip(), - attributes="", - r_data_valid="Valid if ``r_rdy`` is asserted.", - r_attributes=""" - level : out - Number of unread entries. - """.strip(), - w_attributes="") - - def __init__(self, *, width, depth): - super().__init__(width=width, depth=depth, fwft=True) - - self.level = Signal(range(depth + 1)) - - def elaborate(self, platform): - m = Module() - if self.depth == 0: - m.d.comb += [ - self.w_rdy.eq(0), - self.r_rdy.eq(0), - ] - return m - - # Effectively, this queue treats the output register of the non-FWFT inner queue as - # an additional storage element. - m.submodules.unbuffered = fifo = SyncFIFO(width=self.width, depth=self.depth - 1, - fwft=False) - - m.d.comb += [ - fifo.w_data.eq(self.w_data), - fifo.w_en.eq(self.w_en), - self.w_rdy.eq(fifo.w_rdy), - ] - - m.d.comb += [ - self.r_data.eq(fifo.r_data), - fifo.r_en.eq(fifo.r_rdy & (~self.r_rdy | self.r_en)), - ] - with m.If(fifo.r_en): - m.d.sync += self.r_rdy.eq(1) - with m.Elif(self.r_en): - m.d.sync += self.r_rdy.eq(0) - - m.d.comb += [ - self.level.eq(fifo.level + self.r_rdy), - self.w_level.eq(self.level), - self.r_level.eq(self.level), - ] - - return m - - -class AsyncFIFO(Elaboratable, FIFOInterface): - __doc__ = FIFOInterface._doc_template.format( - description=""" - Asynchronous first in, first out queue. - - Read and write interfaces are accessed from different clock domains, which can be set when - constructing the FIFO. - - :class:`AsyncFIFO` can be reset from the write clock domain. When the write domain reset is - asserted, the FIFO becomes empty. When the read domain is reset, data remains in the FIFO - the - read domain logic should correctly handle this case. - - :class:`AsyncFIFO` only supports power of 2 depths. Unless ``exact_depth`` is specified, - the ``depth`` parameter is rounded up to the next power of 2. - """.strip(), - parameters=""" - r_domain : str - Read clock domain. - w_domain : str - Write clock domain. - """.strip(), - attributes=""" - fwft : bool - Always set. - """.strip(), - r_data_valid="Valid if ``r_rdy`` is asserted.", - r_attributes=""" - r_rst : Signal, out - Asserted while the FIFO is being reset by the write-domain reset (for at least one - read-domain clock cycle). - """.strip(), - w_attributes="") - - def __init__(self, *, width, depth, r_domain="read", w_domain="write", exact_depth=False): - if depth != 0: - try: - depth_bits = log2_int(depth, need_pow2=exact_depth) - depth = 1 << depth_bits - except ValueError: - raise ValueError("AsyncFIFO only supports depths that are powers of 2; requested " - "exact depth {} is not" - .format(depth)) from None - else: - depth_bits = 0 - super().__init__(width=width, depth=depth, fwft=True) - - self.r_rst = Signal() - self._r_domain = r_domain - self._w_domain = w_domain - self._ctr_bits = depth_bits + 1 - - def elaborate(self, platform): - m = Module() - if self.depth == 0: - m.d.comb += [ - self.w_rdy.eq(0), - self.r_rdy.eq(0), - ] - return m - - # The design of this queue is the "style #2" from Clifford E. Cummings' paper "Simulation - # and Synthesis Techniques for Asynchronous FIFO Design": - # http://www.sunburst-design.com/papers/CummingsSNUG2002SJ_FIFO1.pdf - - do_write = self.w_rdy & self.w_en - do_read = self.r_rdy & self.r_en - - # TODO: extract this pattern into lib.cdc.GrayCounter - produce_w_bin = Signal(self._ctr_bits) - produce_w_nxt = Signal(self._ctr_bits) - m.d.comb += produce_w_nxt.eq(produce_w_bin + do_write) - m.d[self._w_domain] += produce_w_bin.eq(produce_w_nxt) - - # Note: Both read-domain counters must be reset_less (see comments below) - consume_r_bin = Signal(self._ctr_bits, reset_less=True) - consume_r_nxt = Signal(self._ctr_bits) - m.d.comb += consume_r_nxt.eq(consume_r_bin + do_read) - m.d[self._r_domain] += consume_r_bin.eq(consume_r_nxt) - - produce_w_gry = Signal(self._ctr_bits) - produce_r_gry = Signal(self._ctr_bits) - produce_enc = m.submodules.produce_enc = \ - GrayEncoder(self._ctr_bits) - produce_cdc = m.submodules.produce_cdc = \ - FFSynchronizer(produce_w_gry, produce_r_gry, o_domain=self._r_domain) - m.d.comb += produce_enc.i.eq(produce_w_nxt), - m.d[self._w_domain] += produce_w_gry.eq(produce_enc.o) - - consume_r_gry = Signal(self._ctr_bits, reset_less=True) - consume_w_gry = Signal(self._ctr_bits) - consume_enc = m.submodules.consume_enc = \ - GrayEncoder(self._ctr_bits) - consume_cdc = m.submodules.consume_cdc = \ - FFSynchronizer(consume_r_gry, consume_w_gry, o_domain=self._w_domain) - m.d.comb += consume_enc.i.eq(consume_r_nxt) - m.d[self._r_domain] += consume_r_gry.eq(consume_enc.o) - - consume_w_bin = Signal(self._ctr_bits) - consume_dec = m.submodules.consume_dec = \ - GrayDecoder(self._ctr_bits) - m.d.comb += consume_dec.i.eq(consume_w_gry), - m.d[self._w_domain] += consume_w_bin.eq(consume_dec.o) - - produce_r_bin = Signal(self._ctr_bits) - produce_dec = m.submodules.produce_dec = \ - GrayDecoder(self._ctr_bits) - m.d.comb += produce_dec.i.eq(produce_r_gry), - m.d.comb += produce_r_bin.eq(produce_dec.o) - - w_full = Signal() - r_empty = Signal() - m.d.comb += [ - w_full.eq((produce_w_gry[-1] != consume_w_gry[-1]) & - (produce_w_gry[-2] != consume_w_gry[-2]) & - (produce_w_gry[:-2] == consume_w_gry[:-2])), - r_empty.eq(consume_r_gry == produce_r_gry), - ] - - m.d[self._w_domain] += self.w_level.eq((produce_w_bin - consume_w_bin)) - m.d.comb += self.r_level.eq((produce_r_bin - consume_r_bin)) - - storage = Memory(width=self.width, depth=self.depth) - w_port = m.submodules.w_port = storage.write_port(domain=self._w_domain) - r_port = m.submodules.r_port = storage.read_port (domain=self._r_domain, - transparent=False) - m.d.comb += [ - w_port.addr.eq(produce_w_bin[:-1]), - w_port.data.eq(self.w_data), - w_port.en.eq(do_write), - self.w_rdy.eq(~w_full), - ] - m.d.comb += [ - r_port.addr.eq(consume_r_nxt[:-1]), - self.r_data.eq(r_port.data), - r_port.en.eq(1), - self.r_rdy.eq(~r_empty), - ] - - # Reset handling to maintain FIFO and CDC invariants in the presence of a write-domain - # reset. - # There is a CDC hazard associated with resetting an async FIFO - Gray code counters which - # are reset to 0 violate their Gray code invariant. One way to handle this is to ensure - # that both sides of the FIFO are asynchronously reset by the same signal. We adopt a - # slight variation on this approach - reset control rests entirely with the write domain. - # The write domain's reset signal is used to asynchronously reset the read domain's - # counters and force the FIFO to be empty when the write domain's reset is asserted. - # This requires the two read domain counters to be marked as "reset_less", as they are - # reset through another mechanism. See https://github.com/nmigen/nmigen/issues/181 for the - # full discussion. - w_rst = ResetSignal(domain=self._w_domain, allow_reset_less=True) - r_rst = Signal() - - # Async-set-sync-release synchronizer avoids CDC hazards - rst_cdc = m.submodules.rst_cdc = \ - AsyncFFSynchronizer(w_rst, r_rst, o_domain=self._r_domain) - - # Decode Gray code counter synchronized from write domain to overwrite binary - # counter in read domain. - rst_dec = m.submodules.rst_dec = \ - GrayDecoder(self._ctr_bits) - m.d.comb += rst_dec.i.eq(produce_r_gry) - with m.If(r_rst): - m.d.comb += r_empty.eq(1) - m.d[self._r_domain] += consume_r_gry.eq(produce_r_gry) - m.d[self._r_domain] += consume_r_bin.eq(rst_dec.o) - m.d[self._r_domain] += self.r_rst.eq(1) - with m.Else(): - m.d[self._r_domain] += self.r_rst.eq(0) - - if platform == "formal": - with m.If(Initial()): - m.d.comb += Assume(produce_w_gry == (produce_w_bin ^ produce_w_bin[1:])) - m.d.comb += Assume(consume_r_gry == (consume_r_bin ^ consume_r_bin[1:])) - - return m - - -class AsyncFIFOBuffered(Elaboratable, FIFOInterface): - __doc__ = FIFOInterface._doc_template.format( - description=""" - Buffered asynchronous first in, first out queue. - - Read and write interfaces are accessed from different clock domains, which can be set when - constructing the FIFO. - - :class:`AsyncFIFOBuffered` only supports power of 2 plus one depths. Unless ``exact_depth`` - is specified, the ``depth`` parameter is rounded up to the next power of 2 plus one. - (The output buffer acts as an additional queue element.) - - This queue's interface is identical to :class:`AsyncFIFO`, but it has an additional register - on the output, improving timing in case of block RAM that has large clock-to-output delay. - - In exchange, the latency between an entry being written to an empty queue and that entry - becoming available on the output is increased by one cycle compared to :class:`AsyncFIFO`. - """.strip(), - parameters=""" - r_domain : str - Read clock domain. - w_domain : str - Write clock domain. - """.strip(), - attributes=""" - fwft : bool - Always set. - """.strip(), - r_data_valid="Valid if ``r_rdy`` is asserted.", - r_attributes=""" - r_rst : Signal, out - Asserted while the FIFO is being reset by the write-domain reset (for at least one - read-domain clock cycle). - """.strip(), - w_attributes="") - - def __init__(self, *, width, depth, r_domain="read", w_domain="write", exact_depth=False): - if depth != 0: - try: - depth_bits = log2_int(max(0, depth - 1), need_pow2=exact_depth) - depth = (1 << depth_bits) + 1 - except ValueError: - raise ValueError("AsyncFIFOBuffered only supports depths that are one higher " - "than powers of 2; requested exact depth {} is not" - .format(depth)) from None - super().__init__(width=width, depth=depth, fwft=True) - - self.r_rst = Signal() - self._r_domain = r_domain - self._w_domain = w_domain - - def elaborate(self, platform): - m = Module() - if self.depth == 0: - m.d.comb += [ - self.w_rdy.eq(0), - self.r_rdy.eq(0), - ] - return m - - m.submodules.unbuffered = fifo = AsyncFIFO(width=self.width, depth=self.depth - 1, - r_domain=self._r_domain, w_domain=self._w_domain) - - m.d.comb += [ - fifo.w_data.eq(self.w_data), - self.w_rdy.eq(fifo.w_rdy), - fifo.w_en.eq(self.w_en), - ] - - r_consume_buffered = Signal() - m.d.comb += r_consume_buffered.eq((self.r_rdy - self.r_en) & self.r_rdy) - m.d[self._r_domain] += self.r_level.eq(fifo.r_level + r_consume_buffered) - - w_consume_buffered = Signal() - m.submodules.consume_buffered_cdc = FFSynchronizer(r_consume_buffered, w_consume_buffered, o_domain=self._w_domain, stages=4) - m.d.comb += self.w_level.eq(fifo.w_level + w_consume_buffered) - - with m.If(self.r_en | ~self.r_rdy): - m.d[self._r_domain] += [ - self.r_data.eq(fifo.r_data), - self.r_rdy.eq(fifo.r_rdy), - self.r_rst.eq(fifo.r_rst), - ] - m.d.comb += [ - fifo.r_en.eq(1) - ] - - return m +import warnings +warnings.warn("instead of nmigen.lib.fifo, use amaranth.lib.fifo", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/lib/io.py b/nmigen/lib/io.py index 9776eff..7cc0e77 100644 --- a/nmigen/lib/io.py +++ b/nmigen/lib/io.py @@ -1,116 +1,7 @@ -from .. import * -from ..hdl.rec import * +from amaranth.lib.io import * +from amaranth.lib.io import __all__ -__all__ = ["pin_layout", "Pin"] - - -def pin_layout(width, dir, xdr=0): - """ - Layout of the platform interface of a pin or several pins, which may be used inside - user-defined records. - - See :class:`Pin` for details. - """ - if not isinstance(width, int) or width < 1: - raise TypeError("Width must be a positive integer, not {!r}" - .format(width)) - if dir not in ("i", "o", "oe", "io"): - raise TypeError("Direction must be one of \"i\", \"o\", \"io\", or \"oe\", not {!r}""" - .format(dir)) - if not isinstance(xdr, int) or xdr < 0: - raise TypeError("Gearing ratio must be a non-negative integer, not {!r}" - .format(xdr)) - - fields = [] - if dir in ("i", "io"): - if xdr > 0: - fields.append(("i_clk", 1)) - if xdr > 2: - fields.append(("i_fclk", 1)) - if xdr in (0, 1): - fields.append(("i", width)) - else: - for n in range(xdr): - fields.append(("i{}".format(n), width)) - if dir in ("o", "oe", "io"): - if xdr > 0: - fields.append(("o_clk", 1)) - if xdr > 2: - fields.append(("o_fclk", 1)) - if xdr in (0, 1): - fields.append(("o", width)) - else: - for n in range(xdr): - fields.append(("o{}".format(n), width)) - if dir in ("oe", "io"): - fields.append(("oe", 1)) - return Layout(fields) - - -class Pin(Record): - """ - An interface to an I/O buffer or a group of them that provides uniform access to input, output, - or tristate buffers that may include a 1:n gearbox. (A 1:2 gearbox is typically called "DDR".) - - A :class:`Pin` is identical to a :class:`Record` that uses the corresponding :meth:`pin_layout` - except that it allos accessing the parameters like ``width`` as attributes. It is legal to use - a plain :class:`Record` anywhere a :class:`Pin` is used, provided that these attributes are - not necessary. - - Parameters - ---------- - width : int - Width of the ``i``/``iN`` and ``o``/``oN`` signals. - dir : ``"i"``, ``"o"``, ``"io"``, ``"oe"`` - Direction of the buffers. If ``"i"`` is specified, only the ``i``/``iN`` signals are - present. If ``"o"`` is specified, only the ``o``/``oN`` signals are present. If ``"oe"`` is - specified, the ``o``/``oN`` signals are present, and an ``oe`` signal is present. - If ``"io"`` is specified, both the ``i``/``iN`` and ``o``/``oN`` signals are present, and - an ``oe`` signal is present. - xdr : int - Gearbox ratio. If equal to 0, the I/O buffer is combinatorial, and only ``i``/``o`` - signals are present. If equal to 1, the I/O buffer is SDR, and only ``i``/``o`` signals are - present. If greater than 1, the I/O buffer includes a gearbox, and ``iN``/``oN`` signals - are present instead, where ``N in range(0, N)``. For example, if ``xdr=2``, the I/O buffer - is DDR; the signal ``i0`` reflects the value at the rising edge, and the signal ``i1`` - reflects the value at the falling edge. - name : str - Name of the underlying record. - - Attributes - ---------- - i_clk: - I/O buffer input clock. Synchronizes `i*`. Present if ``xdr`` is nonzero. - i_fclk: - I/O buffer input fast clock. Synchronizes `i*` on higer gearbox ratios. Present if ``xdr`` - is greater than 2. - i : Signal, out - I/O buffer input, without gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is - equal to 0 or 1. - i0, i1, ... : Signal, out - I/O buffer inputs, with gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is - greater than 1. - o_clk: - I/O buffer output clock. Synchronizes `o*`, including `oe`. Present if ``xdr`` is nonzero. - o_fclk: - I/O buffer output fast clock. Synchronizes `o*` on higher gearbox ratios. Present if - ``xdr`` is greater than 2. - o : Signal, in - I/O buffer output, without gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is - equal to 0 or 1. - o0, o1, ... : Signal, in - I/O buffer outputs, with gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is - greater than 1. - oe : Signal, in - I/O buffer output enable. Present if ``dir="io"`` or ``dir="oe"``. Buffers generally - cannot change direction more than once per cycle, so at most one output enable signal - is present. - """ - def __init__(self, width, dir, *, xdr=0, name=None, src_loc_at=0): - self.width = width - self.dir = dir - self.xdr = xdr - - super().__init__(pin_layout(self.width, self.dir, self.xdr), - name=name, src_loc_at=src_loc_at + 1) +import warnings +warnings.warn("instead of nmigen.lib.io, use amaranth.lib.io", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/lib/scheduler.py b/nmigen/lib/scheduler.py index 47bc38f..a8ead3b 100644 --- a/nmigen/lib/scheduler.py +++ b/nmigen/lib/scheduler.py @@ -1,60 +1,7 @@ -from .. import * +from amaranth.lib.scheduler import * +from amaranth.lib.scheduler import __all__ -__all__ = ["RoundRobin"] - - -class RoundRobin(Elaboratable): - """Round-robin scheduler. - - For a given set of requests, the round-robin scheduler will - grant one request. Once it grants a request, if any other - requests are active, it grants the next active request with - a greater number, restarting from zero once it reaches the - highest one. - - Use :class:`EnableInserter` to control when the scheduler - is updated. - - Parameters - ---------- - count : int - Number of requests. - - Attributes - ---------- - requests : Signal(count), in - Set of requests. - grant : Signal(range(count)), out - Number of the granted request. Does not change if there are no - active requests. - valid : Signal(), out - Asserted if grant corresponds to an active request. Deasserted - otherwise, i.e. if no requests are active. - """ - def __init__(self, *, count): - if not isinstance(count, int) or count < 0: - raise ValueError("Count must be a non-negative integer, not {!r}" - .format(count)) - self.count = count - - self.requests = Signal(count) - self.grant = Signal(range(count)) - self.valid = Signal() - - def elaborate(self, platform): - m = Module() - - with m.Switch(self.grant): - for i in range(self.count): - with m.Case(i): - for pred in reversed(range(i)): - with m.If(self.requests[pred]): - m.d.sync += self.grant.eq(pred) - for succ in reversed(range(i + 1, self.count)): - with m.If(self.requests[succ]): - m.d.sync += self.grant.eq(succ) - - m.d.sync += self.valid.eq(self.requests.any()) - - return m +import warnings +warnings.warn("instead of nmigen.lib.scheduler, use amaranth.lib.scheduler", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/rpc.py b/nmigen/rpc.py index f3455e6..aa1fbec 100644 --- a/nmigen/rpc.py +++ b/nmigen/rpc.py @@ -1,111 +1,7 @@ -import sys -import json -import argparse -import importlib - -from .hdl import Signal, Record, Elaboratable -from .back import rtlil +from amaranth.rpc import * +from amaranth.rpc import __all__ -__all__ = ["main"] - - -def _collect_modules(names): - modules = {} - for name in names: - py_module_name, py_class_name = name.rsplit(".", 1) - py_module = importlib.import_module(py_module_name) - if py_class_name == "*": - for py_class_name in py_module.__all__: - py_class = py_module.__dict__[py_class_name] - if not issubclass(py_class, Elaboratable): - continue - modules["{}.{}".format(py_module_name, py_class_name)] = py_class - else: - py_class = py_module.__dict__[py_class_name] - if not isinstance(py_class, type) or not issubclass(py_class, Elaboratable): - raise TypeError("{}.{} is not a class inheriting from Elaboratable" - .format(py_module_name, py_class_name)) - modules[name] = py_class - return modules - - -def _serve_yosys(modules): - while True: - request_json = sys.stdin.readline() - if not request_json: break - request = json.loads(request_json) - - if request["method"] == "modules": - response = {"modules": list(modules.keys())} - - elif request["method"] == "derive": - module_name = request["module"] - - args, kwargs = [], {} - for parameter_name, parameter in request["parameters"].items(): - if parameter["type"] == "unsigned": - parameter_value = int(parameter["value"], 2) - elif parameter["type"] == "signed": - width = len(parameter["value"]) - parameter_value = int(parameter["value"], 2) - if parameter_value & (1 << (width - 1)): - parameter_value = -((1 << width) - parameter_value) - elif parameter["type"] == "string": - parameter_value = parameter["value"] - elif parameter["type"] == "real": - parameter_value = float(parameter["value"]) - else: - raise NotImplementedError("Unrecognized parameter type {}" - .format(parameter_name)) - if parameter_name.startswith("$"): - index = int(parameter_name[1:]) - while len(args) < index: - args.append(None) - args[index] = parameter_value - if parameter_name.startswith("\\"): - kwargs[parameter_name[1:]] = parameter_value - - try: - elaboratable = modules[module_name](*args, **kwargs) - ports = [] - # By convention, any public attribute that is a Signal or a Record is - # considered a port. - for port_name, port in vars(elaboratable).items(): - if not port_name.startswith("_") and isinstance(port, (Signal, Record)): - ports += port._lhs_signals() - rtlil_text = rtlil.convert(elaboratable, name=module_name, ports=ports) - response = {"frontend": "ilang", "source": rtlil_text} - except Exception as error: - response = {"error": "{}: {}".format(type(error).__name__, str(error))} - - else: - return {"error": "Unrecognized method {!r}".format(request["method"])} - - sys.stdout.write(json.dumps(response)) - sys.stdout.write("\n") - sys.stdout.flush() - - -def main(): - parser = argparse.ArgumentParser(description=r""" - The nMigen RPC server allows a HDL synthesis program to request an nMigen module to - be elaborated on demand using the parameters it provides. For example, using Yosys together - with the nMigen RPC server allows instantiating parametric nMigen modules directly - from Verilog. - """) - def add_modules_arg(parser): - parser.add_argument("modules", metavar="MODULE", type=str, nargs="+", - help="import and provide MODULES") - protocols = parser.add_subparsers(metavar="PROTOCOL", dest="protocol", required=True) - protocol_yosys = protocols.add_parser("yosys", help="use Yosys JSON-based RPC protocol") - add_modules_arg(protocol_yosys) - - args = parser.parse_args() - modules = _collect_modules(args.modules) - if args.protocol == "yosys": - _serve_yosys(modules) - - -if __name__ == "__main__": - main() +import warnings +warnings.warn("instead of nmigen.rpc, use amaranth.rpc", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/sim/__init__.py b/nmigen/sim/__init__.py index c239c52..7335701 100644 --- a/nmigen/sim/__init__.py +++ b/nmigen/sim/__init__.py @@ -1,4 +1,7 @@ -from .core import * +from amaranth.sim import * +from amaranth.sim import __all__ -__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"] +import warnings +warnings.warn("instead of nmigen.sim, use amaranth.sim", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/sim/core.py b/nmigen/sim/core.py index 886cea1..fb428e5 100644 --- a/nmigen/sim/core.py +++ b/nmigen/sim/core.py @@ -1,206 +1,7 @@ -import inspect - -from .._utils import deprecated -from ..hdl.cd import * -from ..hdl.ir import * -from ._base import BaseEngine +from amaranth.sim.core import * +from amaranth.sim.core import __all__ -__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"] - - -class Command: - pass - - -class Settle(Command): - def __repr__(self): - return "(settle)" - - -class Delay(Command): - def __init__(self, interval=None): - self.interval = None if interval is None else float(interval) - - def __repr__(self): - if self.interval is None: - return "(delay ε)" - else: - return "(delay {:.3}us)".format(self.interval * 1e6) - - -class Tick(Command): - def __init__(self, domain="sync"): - if not isinstance(domain, (str, ClockDomain)): - raise TypeError("Domain must be a string or a ClockDomain instance, not {!r}" - .format(domain)) - assert domain != "comb" - self.domain = domain - - def __repr__(self): - return "(tick {})".format(self.domain) - - -class Passive(Command): - def __repr__(self): - return "(passive)" - - -class Active(Command): - def __repr__(self): - return "(active)" - - -class Simulator: - def __init__(self, fragment, *, engine="pysim"): - if isinstance(engine, type) and issubclass(engine, BaseEngine): - pass - elif engine == "pysim": - from .pysim import PySimEngine - engine = PySimEngine - else: - raise TypeError("Value '{!r}' is not a simulation engine class or " - "a simulation engine name" - .format(engine)) - - self._fragment = Fragment.get(fragment, platform=None).prepare() - self._engine = engine(self._fragment) - self._clocked = set() - - def _check_process(self, process): - if not (inspect.isgeneratorfunction(process) or inspect.iscoroutinefunction(process)): - raise TypeError("Cannot add a process {!r} because it is not a generator function" - .format(process)) - return process - - def add_process(self, process): - process = self._check_process(process) - def wrapper(): - # Only start a bench process after comb settling, so that the reset values are correct. - yield Settle() - yield from process() - self._engine.add_coroutine_process(wrapper, default_cmd=None) - - def add_sync_process(self, process, *, domain="sync"): - process = self._check_process(process) - def wrapper(): - # Only start a sync process after the first clock edge (or reset edge, if the domain - # uses an asynchronous reset). This matches the behavior of synchronous FFs. - yield Tick(domain) - yield from process() - self._engine.add_coroutine_process(wrapper, default_cmd=Tick(domain)) - - def add_clock(self, period, *, phase=None, domain="sync", if_exists=False): - """Add a clock process. - - Adds a process that drives the clock signal of ``domain`` at a 50% duty cycle. - - Arguments - --------- - period : float - Clock period. The process will toggle the ``domain`` clock signal every ``period / 2`` - seconds. - phase : None or float - Clock phase. The process will wait ``phase`` seconds before the first clock transition. - If not specified, defaults to ``period / 2``. - domain : str or ClockDomain - Driven clock domain. If specified as a string, the domain with that name is looked up - in the root fragment of the simulation. - if_exists : bool - If ``False`` (the default), raise an error if the driven domain is specified as - a string and the root fragment does not have such a domain. If ``True``, do nothing - in this case. - """ - if isinstance(domain, ClockDomain): - pass - elif domain in self._fragment.domains: - domain = self._fragment.domains[domain] - elif if_exists: - return - else: - raise ValueError("Domain {!r} is not present in simulation" - .format(domain)) - if domain in self._clocked: - raise ValueError("Domain {!r} already has a clock driving it" - .format(domain.name)) - - if phase is None: - # By default, delay the first edge by half period. This causes any synchronous activity - # to happen at a non-zero time, distinguishing it from the reset values in the waveform - # viewer. - phase = period / 2 - self._engine.add_clock_process(domain.clk, phase=phase, period=period) - self._clocked.add(domain) - - def reset(self): - """Reset the simulation. - - Assign the reset value to every signal in the simulation, and restart every user process. - """ - self._engine.reset() - - # TODO(nmigen-0.4): replace with _real_step - @deprecated("instead of `sim.step()`, use `sim.advance()`") - def step(self): - return self.advance() - - def advance(self): - """Advance the simulation. - - Run every process and commit changes until a fixed point is reached, then advance time - to the closest deadline (if any). If there is an unstable combinatorial loop, - this function will never return. - - Returns ``True`` if there are any active processes, ``False`` otherwise. - """ - return self._engine.advance() - - def run(self): - """Run the simulation while any processes are active. - - Processes added with :meth:`add_process` and :meth:`add_sync_process` are initially active, - and may change their status using the ``yield Passive()`` and ``yield Active()`` commands. - Processes compiled from HDL and added with :meth:`add_clock` are always passive. - """ - while self.advance(): - pass - - def run_until(self, deadline, *, run_passive=False): - """Run the simulation until it advances to ``deadline``. - - If ``run_passive`` is ``False``, the simulation also stops when there are no active - processes, similar to :meth:`run`. Otherwise, the simulation will stop only after it - advances to or past ``deadline``. - - If the simulation stops advancing, this function will never return. - """ - assert self._engine.now <= deadline - while (self.advance() or run_passive) and self._engine.now < deadline: - pass - - def write_vcd(self, vcd_file, gtkw_file=None, *, traces=()): - """Write waveforms to a Value Change Dump file, optionally populating a GTKWave save file. - - This method returns a context manager. It can be used as: :: - - sim = Simulator(frag) - sim.add_clock(1e-6) - with sim.write_vcd("dump.vcd", "dump.gtkw"): - sim.run_until(1e-3) - - Arguments - --------- - vcd_file : str or file-like object - Verilog Value Change Dump file or filename. - gtkw_file : str or file-like object - GTKWave save file or filename. - traces : iterable of Signal - Signals to display traces for. - """ - if self._engine.now != 0.0: - for file in (vcd_file, gtkw_file): - if hasattr(file, "close"): - file.close() - raise ValueError("Cannot start writing waveforms after advancing simulation time") - - return self._engine.write_vcd(vcd_file=vcd_file, gtkw_file=gtkw_file, traces=traces) +import warnings +warnings.warn("instead of nmigen.sim.core, use amaranth.sim.core", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/sim/pysim.py b/nmigen/sim/pysim.py index ec98fb0..f13d634 100644 --- a/nmigen/sim/pysim.py +++ b/nmigen/sim/pysim.py @@ -1,336 +1,7 @@ -from contextlib import contextmanager -import itertools -from vcd import VCDWriter -from vcd.gtkw import GTKWSave +from amaranth.sim.pysim import * +from amaranth.sim.pysim import __all__ -from ..hdl import * -from ..hdl.ast import SignalDict -from ._base import * -from ._pyrtl import _FragmentCompiler -from ._pycoro import PyCoroProcess -from ._pyclock import PyClockProcess - -__all__ = ["PySimEngine"] - - -class _NameExtractor: - def __init__(self): - self.names = SignalDict() - - def __call__(self, fragment, *, hierarchy=("top",)): - def add_signal_name(signal): - hierarchical_signal_name = (*hierarchy, signal.name) - if signal not in self.names: - self.names[signal] = {hierarchical_signal_name} - else: - self.names[signal].add(hierarchical_signal_name) - - for domain_name, domain_signals in fragment.drivers.items(): - if domain_name is not None: - domain = fragment.domains[domain_name] - add_signal_name(domain.clk) - if domain.rst is not None: - add_signal_name(domain.rst) - - for statement in fragment.statements: - for signal in statement._lhs_signals() | statement._rhs_signals(): - if not isinstance(signal, (ClockSignal, ResetSignal)): - add_signal_name(signal) - - for subfragment_index, (subfragment, subfragment_name) in enumerate(fragment.subfragments): - if subfragment_name is None: - subfragment_name = "U${}".format(subfragment_index) - self(subfragment, hierarchy=(*hierarchy, subfragment_name)) - - return self.names - - -class _VCDWriter: - @staticmethod - def timestamp_to_vcd(timestamp): - return timestamp * (10 ** 10) # 1/(100 ps) - - @staticmethod - def decode_to_vcd(signal, value): - return signal.decoder(value).expandtabs().replace(" ", "_") - - def __init__(self, fragment, *, vcd_file, gtkw_file=None, traces=()): - if isinstance(vcd_file, str): - vcd_file = open(vcd_file, "wt") - if isinstance(gtkw_file, str): - gtkw_file = open(gtkw_file, "wt") - - self.vcd_vars = SignalDict() - self.vcd_file = vcd_file - self.vcd_writer = vcd_file and VCDWriter(self.vcd_file, - timescale="100 ps", comment="Generated by nMigen") - - self.gtkw_names = SignalDict() - self.gtkw_file = gtkw_file - self.gtkw_save = gtkw_file and GTKWSave(self.gtkw_file) - - self.traces = [] - - signal_names = _NameExtractor()(fragment) - - trace_names = SignalDict() - for trace in traces: - if trace not in signal_names: - trace_names[trace] = {("top", trace.name)} - self.traces.append(trace) - - if self.vcd_writer is None: - return - - for signal, names in itertools.chain(signal_names.items(), trace_names.items()): - if signal.decoder: - var_type = "string" - var_size = 1 - var_init = self.decode_to_vcd(signal, signal.reset) - else: - var_type = "wire" - var_size = signal.width - var_init = signal.reset - - for (*var_scope, var_name) in names: - suffix = None - while True: - try: - if suffix is None: - var_name_suffix = var_name - else: - var_name_suffix = "{}${}".format(var_name, suffix) - if signal not in self.vcd_vars: - vcd_var = self.vcd_writer.register_var( - scope=var_scope, name=var_name_suffix, - var_type=var_type, size=var_size, init=var_init) - self.vcd_vars[signal] = vcd_var - else: - self.vcd_writer.register_alias( - scope=var_scope, name=var_name_suffix, - var=self.vcd_vars[signal]) - break - except KeyError: - suffix = (suffix or 0) + 1 - - if signal not in self.gtkw_names: - self.gtkw_names[signal] = (*var_scope, var_name_suffix) - - def update(self, timestamp, signal, value): - vcd_var = self.vcd_vars.get(signal) - if vcd_var is None: - return - - vcd_timestamp = self.timestamp_to_vcd(timestamp) - if signal.decoder: - var_value = self.decode_to_vcd(signal, value) - else: - var_value = value - self.vcd_writer.change(vcd_var, vcd_timestamp, var_value) - - def close(self, timestamp): - if self.vcd_writer is not None: - self.vcd_writer.close(self.timestamp_to_vcd(timestamp)) - - if self.gtkw_save is not None: - self.gtkw_save.dumpfile(self.vcd_file.name) - self.gtkw_save.dumpfile_size(self.vcd_file.tell()) - - self.gtkw_save.treeopen("top") - for signal in self.traces: - if len(signal) > 1 and not signal.decoder: - suffix = "[{}:0]".format(len(signal) - 1) - else: - suffix = "" - self.gtkw_save.trace(".".join(self.gtkw_names[signal]) + suffix) - - if self.vcd_file is not None: - self.vcd_file.close() - if self.gtkw_file is not None: - self.gtkw_file.close() - - -class _Timeline: - def __init__(self): - self.now = 0.0 - self.deadlines = dict() - - def reset(self): - self.now = 0.0 - self.deadlines.clear() - - def at(self, run_at, process): - assert process not in self.deadlines - self.deadlines[process] = run_at - - def delay(self, delay_by, process): - if delay_by is None: - run_at = self.now - else: - run_at = self.now + delay_by - self.at(run_at, process) - - def advance(self): - nearest_processes = set() - nearest_deadline = None - for process, deadline in self.deadlines.items(): - if deadline is None: - if nearest_deadline is not None: - nearest_processes.clear() - nearest_processes.add(process) - nearest_deadline = self.now - break - elif nearest_deadline is None or deadline <= nearest_deadline: - assert deadline >= self.now - if nearest_deadline is not None and deadline < nearest_deadline: - nearest_processes.clear() - nearest_processes.add(process) - nearest_deadline = deadline - - if not nearest_processes: - return False - - for process in nearest_processes: - process.runnable = True - del self.deadlines[process] - self.now = nearest_deadline - - return True - - -class _PySignalState(BaseSignalState): - __slots__ = ("signal", "curr", "next", "waiters", "pending") - - def __init__(self, signal, pending): - self.signal = signal - self.pending = pending - self.waiters = dict() - self.curr = self.next = signal.reset - - def set(self, value): - if self.next == value: - return - self.next = value - self.pending.add(self) - - def commit(self): - if self.curr == self.next: - return False - self.curr = self.next - - awoken_any = False - for process, trigger in self.waiters.items(): - if trigger is None or trigger == self.curr: - process.runnable = awoken_any = True - return awoken_any - - -class _PySimulation(BaseSimulation): - def __init__(self): - self.timeline = _Timeline() - self.signals = SignalDict() - self.slots = [] - self.pending = set() - - def reset(self): - self.timeline.reset() - for signal, index in self.signals.items(): - self.slots[index].curr = self.slots[index].next = signal.reset - self.pending.clear() - - def get_signal(self, signal): - try: - return self.signals[signal] - except KeyError: - index = len(self.slots) - self.slots.append(_PySignalState(signal, self.pending)) - self.signals[signal] = index - return index - - def add_trigger(self, process, signal, *, trigger=None): - index = self.get_signal(signal) - assert (process not in self.slots[index].waiters or - self.slots[index].waiters[process] == trigger) - self.slots[index].waiters[process] = trigger - - def remove_trigger(self, process, signal): - index = self.get_signal(signal) - assert process in self.slots[index].waiters - del self.slots[index].waiters[process] - - def wait_interval(self, process, interval): - self.timeline.delay(interval, process) - - def commit(self, changed=None): - converged = True - for signal_state in self.pending: - if signal_state.commit(): - converged = False - if changed is not None: - changed.update(self.pending) - self.pending.clear() - return converged - - -class PySimEngine(BaseEngine): - def __init__(self, fragment): - self._state = _PySimulation() - self._timeline = self._state.timeline - - self._fragment = fragment - self._processes = _FragmentCompiler(self._state)(self._fragment) - self._vcd_writers = [] - - def add_coroutine_process(self, process, *, default_cmd): - self._processes.add(PyCoroProcess(self._state, self._fragment.domains, process, - default_cmd=default_cmd)) - - def add_clock_process(self, clock, *, phase, period): - self._processes.add(PyClockProcess(self._state, clock, - phase=phase, period=period)) - - def reset(self): - self._state.reset() - for process in self._processes: - process.reset() - - def _step(self): - changed = set() if self._vcd_writers else None - - # Performs the two phases of a delta cycle in a loop: - converged = False - while not converged: - # 1. eval: run and suspend every non-waiting process once, queueing signal changes - for process in self._processes: - if process.runnable: - process.runnable = False - process.run() - - # 2. commit: apply every queued signal change, waking up any waiting processes - converged = self._state.commit(changed) - - for vcd_writer in self._vcd_writers: - for signal_state in changed: - vcd_writer.update(self._timeline.now, - signal_state.signal, signal_state.curr) - - def advance(self): - self._step() - self._timeline.advance() - return any(not process.passive for process in self._processes) - - @property - def now(self): - return self._timeline.now - - @contextmanager - def write_vcd(self, *, vcd_file, gtkw_file, traces): - vcd_writer = _VCDWriter(self._fragment, - vcd_file=vcd_file, gtkw_file=gtkw_file, traces=traces) - try: - self._vcd_writers.append(vcd_writer) - yield - finally: - vcd_writer.close(self._timeline.now) - self._vcd_writers.remove(vcd_writer) +import warnings +warnings.warn("instead of nmigen.sim.pysim, use amaranth.sim.pysim", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/test/__init__.py b/nmigen/test/__init__.py index 2ec5765..4417ea1 100644 --- a/nmigen/test/__init__.py +++ b/nmigen/test/__init__.py @@ -1 +1,6 @@ -# TODO(nmigen-0.4): remove the entire package +from amaranth.test import * + + +import warnings +warnings.warn("instead of nmigen.test, use amaranth.test", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/test/utils.py b/nmigen/test/utils.py index 681243c..59aeaaa 100644 --- a/nmigen/test/utils.py +++ b/nmigen/test/utils.py @@ -1,84 +1,7 @@ -import os -import re -import shutil -import subprocess -import textwrap -import traceback -import unittest +from amaranth.test.utils import * +from amaranth.test.utils import __all__ + + import warnings - -from ..hdl.ast import * -from ..hdl.ir import * -from ..back import rtlil -from .._toolchain import require_tool - - -warnings.warn("nmigen.test.utils is an internal utility module that has several design flaws " - "and was never intended as a public API; it will be removed in nmigen 0.4. " - "if you are using FHDLTestCase, include its implementation in your codebase. " - "see also nmigen/nmigen#487", +warnings.warn("instead of nmigen.test.utils, use amaranth.test.utils", DeprecationWarning, stacklevel=2) - - -__all__ = ["FHDLTestCase"] - - -class FHDLTestCase(unittest.TestCase): - def assertRepr(self, obj, repr_str): - if isinstance(obj, list): - obj = Statement.cast(obj) - def prepare_repr(repr_str): - repr_str = re.sub(r"\s+", " ", repr_str) - repr_str = re.sub(r"\( (?=\()", "(", repr_str) - repr_str = re.sub(r"\) (?=\))", ")", repr_str) - return repr_str.strip() - self.assertEqual(prepare_repr(repr(obj)), prepare_repr(repr_str)) - - def assertFormal(self, spec, mode="bmc", depth=1): - caller, *_ = traceback.extract_stack(limit=2) - spec_root, _ = os.path.splitext(caller.filename) - spec_dir = os.path.dirname(spec_root) - spec_name = "{}_{}".format( - os.path.basename(spec_root).replace("test_", "spec_"), - caller.name.replace("test_", "") - ) - - # The sby -f switch seems not fully functional when sby is reading from stdin. - if os.path.exists(os.path.join(spec_dir, spec_name)): - shutil.rmtree(os.path.join(spec_dir, spec_name)) - - if mode == "hybrid": - # A mix of BMC and k-induction, as per personal communication with Claire Wolf. - script = "setattr -unset init w:* a:nmigen.sample_reg %d" - mode = "bmc" - else: - script = "" - - config = textwrap.dedent("""\ - [options] - mode {mode} - depth {depth} - wait on - - [engines] - smtbmc - - [script] - read_ilang top.il - prep - {script} - - [file top.il] - {rtlil} - """).format( - mode=mode, - depth=depth, - script=script, - rtlil=rtlil.convert(Fragment.get(spec, platform="formal")) - ) - with subprocess.Popen([require_tool("sby"), "-f", "-d", spec_name], cwd=spec_dir, - universal_newlines=True, - stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc: - stdout, stderr = proc.communicate(config) - if proc.returncode != 0: - self.fail("Formal verification failed:\n" + stdout) diff --git a/nmigen/tracer.py b/nmigen/tracer.py index 17795f3..d0ae2af 100644 --- a/nmigen/tracer.py +++ b/nmigen/tracer.py @@ -1,55 +1,7 @@ -import sys -from opcode import opname +from amaranth.tracer import * +from amaranth.tracer import __all__ -__all__ = ["NameNotFound", "get_var_name", "get_src_loc"] - - -class NameNotFound(Exception): - pass - - -_raise_exception = object() - - -def get_var_name(depth=2, default=_raise_exception): - frame = sys._getframe(depth) - code = frame.f_code - call_index = frame.f_lasti - while True: - call_opc = opname[code.co_code[call_index]] - if call_opc in ("EXTENDED_ARG",): - call_index += 2 - else: - break - if call_opc not in ("CALL_FUNCTION", "CALL_FUNCTION_KW", "CALL_FUNCTION_EX", "CALL_METHOD"): - return None - - index = call_index + 2 - while True: - opc = opname[code.co_code[index]] - if opc in ("STORE_NAME", "STORE_ATTR"): - name_index = int(code.co_code[index + 1]) - return code.co_names[name_index] - elif opc == "STORE_FAST": - name_index = int(code.co_code[index + 1]) - return code.co_varnames[name_index] - elif opc == "STORE_DEREF": - name_index = int(code.co_code[index + 1]) - return code.co_cellvars[name_index] - elif opc in ("LOAD_GLOBAL", "LOAD_NAME", "LOAD_ATTR", "LOAD_FAST", "LOAD_DEREF", - "DUP_TOP", "BUILD_LIST"): - index += 2 - else: - if default is _raise_exception: - raise NameNotFound - else: - return default - - -def get_src_loc(src_loc_at=0): - # n-th frame: get_src_loc() - # n-1th frame: caller of get_src_loc() (usually constructor) - # n-2th frame: caller of caller (usually user code) - frame = sys._getframe(2 + src_loc_at) - return (frame.f_code.co_filename, frame.f_lineno) +import warnings +warnings.warn("instead of nmigen.tracer, use amaranth.tracer", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/utils.py b/nmigen/utils.py index 227258a..af50d65 100644 --- a/nmigen/utils.py +++ b/nmigen/utils.py @@ -1,21 +1,7 @@ -__all__ = ["log2_int", "bits_for"] +from amaranth.utils import * +from amaranth.utils import __all__ -def log2_int(n, need_pow2=True): - if n == 0: - return 0 - r = (n - 1).bit_length() - if need_pow2 and (1 << r) != n: - raise ValueError("{} is not a power of 2".format(n)) - return r - - -def bits_for(n, require_sign_bit=False): - if n > 0: - r = log2_int(n + 1, False) - else: - require_sign_bit = True - r = log2_int(-n, False) - if require_sign_bit: - r += 1 - return r +import warnings +warnings.warn("instead of nmigen.utils, use amaranth.utils", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/__init__.py b/nmigen/vendor/__init__.py index e69de29..d0a733c 100644 --- a/nmigen/vendor/__init__.py +++ b/nmigen/vendor/__init__.py @@ -0,0 +1,6 @@ +from amaranth.vendor import * + + +import warnings +warnings.warn("instead of nmigen.vendor, use amaranth.vendor", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/intel.py b/nmigen/vendor/intel.py index b786393..c1dc33c 100644 --- a/nmigen/vendor/intel.py +++ b/nmigen/vendor/intel.py @@ -1,571 +1,7 @@ -from abc import abstractproperty - -from ..hdl import * -from ..build import * +from amaranth.vendor.intel import * +from amaranth.vendor.intel import __all__ -__all__ = ["IntelPlatform"] - - -class IntelPlatform(TemplatedPlatform): - """ - Quartus toolchain - ----------------- - - Required tools: - * ``quartus_map`` - * ``quartus_fit`` - * ``quartus_asm`` - * ``quartus_sta`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Quartus``, if present. - - Available overrides: - * ``add_settings``: inserts commands at the end of the QSF file. - * ``add_constraints``: inserts commands at the end of the SDC file. - * ``nproc``: sets the number of cores used by all tools. - * ``quartus_map_opts``: adds extra options for ``quartus_map``. - * ``quartus_fit_opts``: adds extra options for ``quartus_fit``. - * ``quartus_asm_opts``: adds extra options for ``quartus_asm``. - * ``quartus_sta_opts``: adds extra options for ``quartus_sta``. - - Build products: - * ``*.rpt``: toolchain reports. - * ``{{name}}.sof``: bitstream as SRAM object file. - * ``{{name}}.rbf``: bitstream as raw binary file. - - - Mistral toolchain - ----------------- - - Required tools: - * ``yosys`` - * ``nextpnr-mistral`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Mistral``, if present. - - * ``verbose``: enables logging of informational messages to standard error. - * ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command. - * ``synth_opts``: adds options for ``synth_intel_alm`` Yosys command. - * ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script. - * ``script_after_synth``: inserts commands after ``synth_intel_alm`` in Yosys script. - * ``yosys_opts``: adds extra options for ``yosys``. - * ``nextpnr_opts``: adds extra options for ``nextpnr-mistral``. - """ - - toolchain = None # selected when creating platform - - device = abstractproperty() - package = abstractproperty() - speed = abstractproperty() - suffix = "" - - # Quartus templates - - quartus_suppressed_warnings = [ - 10264, # All case item expressions in this case statement are onehot - 10270, # Incomplete Verilog case statement has no default case item - 10335, # Unrecognized synthesis attribute - 10763, # Verilog case statement has overlapping case item expressions with non-constant or don't care bits - 10935, # Verilog casex/casez overlaps with a previous casex/vasez item expression - 12125, # Using design file which is not specified as a design file for the current project, but contains definitions used in project - 18236, # Number of processors not specified in QSF - 292013, # Feature is only available with a valid subscription license - ] - - quartus_required_tools = [ - "quartus_map", - "quartus_fit", - "quartus_asm", - "quartus_sta", - ] - - quartus_file_templates = { - **TemplatedPlatform.build_script_templates, - "build_{{name}}.sh": r""" - # {{autogenerated}} - if [ -n "${{platform._toolchain_env_var}}" ]; then - QUARTUS_ROOTDIR=$(dirname $(dirname "${{platform._toolchain_env_var}}")) - # Quartus' qenv.sh does not work with `set -e`. - . "${{platform._toolchain_env_var}}" - fi - set -e{{verbose("x")}} - {{emit_commands("sh")}} - """, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.qsf": r""" - # {{autogenerated}} - {% if get_override("nproc") -%} - set_global_assignment -name NUM_PARALLEL_PROCESSORS {{get_override("nproc")}} - {% endif %} - - {% for file in platform.iter_files(".v") -%} - set_global_assignment -name VERILOG_FILE {{file|tcl_quote}} - {% endfor %} - {% for file in platform.iter_files(".sv") -%} - set_global_assignment -name SYSTEMVERILOG_FILE {{file|tcl_quote}} - {% endfor %} - {% for file in platform.iter_files(".vhd", ".vhdl") -%} - set_global_assignment -name VHDL_FILE {{file|tcl_quote}} - {% endfor %} - set_global_assignment -name VERILOG_FILE {{name}}.v - set_global_assignment -name TOP_LEVEL_ENTITY {{name}} - - set_global_assignment -name DEVICE {{platform.device}}{{platform.package}}{{platform.speed}}{{platform.suffix}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_location_assignment -to {{port_name|tcl_quote}} PIN_{{pin_name}} - {% for key, value in attrs.items() -%} - set_instance_assignment -to {{port_name|tcl_quote}} -name {{key}} {{value|tcl_quote}} - {% endfor %} - {% endfor %} - - set_global_assignment -name GENERATE_RBF_FILE ON - - {{get_override("add_settings")|default("# (add_settings placeholder)")}} - """, - "{{name}}.sdc": r""" - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - create_clock -name {{port_signal.name|tcl_quote}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_quote}}] - {% else -%} - create_clock -name {{net_signal.name|tcl_quote}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("|")|tcl_quote}}] - {% endif %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - "{{name}}.srf": r""" - {% for warning in platform.quartus_suppressed_warnings %} - { "" "" "" "{{name}}.v" { } { } 0 {{warning}} "" 0 0 "Design Software" 0 -1 0 ""} - {% endfor %} - """, - } - quartus_command_templates = [ - r""" - {{invoke_tool("quartus_map")}} - {{get_override("quartus_map_opts")|options}} - --rev={{name}} {{name}} - """, - r""" - {{invoke_tool("quartus_fit")}} - {{get_override("quartus_fit_opts")|options}} - --rev={{name}} {{name}} - """, - r""" - {{invoke_tool("quartus_asm")}} - {{get_override("quartus_asm_opts")|options}} - --rev={{name}} {{name}} - """, - r""" - {{invoke_tool("quartus_sta")}} - {{get_override("quartus_sta_opts")|options}} - --rev={{name}} {{name}} - """, - ] - - - # Mistral templates - - mistral_required_tools = [ - "yosys", - "nextpnr-mistral" - ] - mistral_file_templates = { - **TemplatedPlatform.build_script_templates, - "{{name}}.il": r""" - # {{autogenerated}} - {{emit_rtlil()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.ys": r""" - # {{autogenerated}} - {% for file in platform.iter_files(".v") -%} - read_verilog {{get_override("read_verilog_opts")|options}} {{file}} - {% endfor %} - {% for file in platform.iter_files(".sv") -%} - read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}} - {% endfor %} - {% for file in platform.iter_files(".il") -%} - read_ilang {{file}} - {% endfor %} - read_ilang {{name}}.il - delete w:$verilog_initial_trigger - {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} - synth_intel_alm {{get_override("synth_opts")|options}} -top {{name}} - {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} - write_json {{name}}.json - """, - "{{name}}.qsf": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_location_assignment -to {{port_name|tcl_quote}} PIN_{{pin_name}} - {% for key, value in attrs.items() -%} - set_instance_assignment -to {{port_name|tcl_quote}} -name {{key}} {{value|tcl_quote}} - {% endfor %} - {% endfor %} - """, - - } - mistral_command_templates = [ - r""" - {{invoke_tool("yosys")}} - {{quiet("-q")}} - {{get_override("yosys_opts")|options}} - -l {{name}}.rpt - {{name}}.ys - """, - r""" - {{invoke_tool("nextpnr-mistral")}} - {{quiet("--quiet")}} - {{get_override("nextpnr_opts")|options}} - --log {{name}}.tim - --device {{platform.device}}{{platform.package}}{{platform.speed}}{{platform.suffix}} - --json {{name}}.json - --qsf {{name}}.qsf - --rbf {{name}}.rbf - """ - ] - - # Common logic - - def __init__(self, *, toolchain="Quartus"): - super().__init__() - - assert toolchain in ("Quartus", "Mistral") - self.toolchain = toolchain - - @property - def required_tools(self): - if self.toolchain == "Quartus": - return self.quartus_required_tools - if self.toolchain == "Mistral": - return self.mistral_required_tools - assert False - - @property - def file_templates(self): - if self.toolchain == "Quartus": - return self.quartus_file_templates - if self.toolchain == "Mistral": - return self.mistral_file_templates - assert False - - @property - def command_templates(self): - if self.toolchain == "Quartus": - return self.quartus_command_templates - if self.toolchain == "Mistral": - return self.mistral_command_templates - assert False - - def add_clock_constraint(self, clock, frequency): - super().add_clock_constraint(clock, frequency) - clock.attrs["keep"] = "true" - - @property - def default_clk_constraint(self): - # Internal high-speed oscillator on Cyclone V devices. - # It is specified to not be faster than 100MHz, but the actual - # frequency seems to vary a lot between devices. Measurements - # of 78 to 84 MHz have been observed. - if self.default_clk == "cyclonev_oscillator": - assert self.device.startswith("5C") - return Clock(100e6) - # Otherwise, use the defined Clock resource. - return super().default_clk_constraint - - def create_missing_domain(self, name): - if name == "sync" and self.default_clk == "cyclonev_oscillator": - # Use the internal high-speed oscillator for Cyclone V devices - assert self.device.startswith("5C") - m = Module() - m.domains += ClockDomain("sync") - m.submodules += Instance("cyclonev_oscillator", - i_oscena=Const(1), - o_clkout=ClockSignal("sync")) - return m - else: - return super().create_missing_domain(name) - - # The altiobuf_* and altddio_* primitives are explained in the following Intel documents: - # * https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug_altiobuf.pdf - # * https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug_altddio.pdf - # See also errata mentioned in: https://www.intel.com/content/www/us/en/programmable/support/support-resources/knowledge-base/solutions/rd11192012_735.html. - - @staticmethod - def _get_ireg(m, pin, invert): - def get_ineg(i): - if invert: - i_neg = Signal.like(i, name_suffix="_neg") - m.d.comb += i.eq(~i_neg) - return i_neg - else: - return i - - if pin.xdr == 0: - return get_ineg(pin.i) - elif pin.xdr == 1: - i_sdr = Signal(pin.width, name="{}_i_sdr") - m.submodules += Instance("$dff", - p_CLK_POLARITY=1, - p_WIDTH=pin.width, - i_CLK=pin.i_clk, - i_D=i_sdr, - o_Q=get_ineg(pin.i), - ) - return i_sdr - elif pin.xdr == 2: - i_ddr = Signal(pin.width, name="{}_i_ddr".format(pin.name)) - m.submodules["{}_i_ddr".format(pin.name)] = Instance("altddio_in", - p_width=pin.width, - i_datain=i_ddr, - i_inclock=pin.i_clk, - o_dataout_h=get_ineg(pin.i0), - o_dataout_l=get_ineg(pin.i1), - ) - return i_ddr - assert False - - @staticmethod - def _get_oreg(m, pin, invert): - def get_oneg(o): - if invert: - o_neg = Signal.like(o, name_suffix="_neg") - m.d.comb += o_neg.eq(~o) - return o_neg - else: - return o - - if pin.xdr == 0: - return get_oneg(pin.o) - elif pin.xdr == 1: - o_sdr = Signal(pin.width, name="{}_o_sdr".format(pin.name)) - m.submodules += Instance("$dff", - p_CLK_POLARITY=1, - p_WIDTH=pin.width, - i_CLK=pin.o_clk, - i_D=get_oneg(pin.o), - o_Q=o_sdr, - ) - return o_sdr - elif pin.xdr == 2: - o_ddr = Signal(pin.width, name="{}_o_ddr".format(pin.name)) - m.submodules["{}_o_ddr".format(pin.name)] = Instance("altddio_out", - p_width=pin.width, - o_dataout=o_ddr, - i_outclock=pin.o_clk, - i_datain_h=get_oneg(pin.o0), - i_datain_l=get_oneg(pin.o1), - ) - return o_ddr - assert False - - @staticmethod - def _get_oereg(m, pin): - # altiobuf_ requires an output enable signal for each pin, but pin.oe is 1 bit wide. - if pin.xdr == 0: - return Repl(pin.oe, pin.width) - elif pin.xdr in (1, 2): - oe_reg = Signal(pin.width, name="{}_oe_reg".format(pin.name)) - oe_reg.attrs["useioff"] = "1" - m.submodules += Instance("$dff", - p_CLK_POLARITY=1, - p_WIDTH=pin.width, - i_CLK=pin.o_clk, - i_D=pin.oe, - o_Q=oe_reg, - ) - return oe_reg - assert False - - def get_input(self, pin, port, attrs, invert): - self._check_feature("single-ended input", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_in", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="FALSE", - i_datain=port.io, - o_dataout=self._get_ireg(m, pin, invert) - ) - return m - - def get_output(self, pin, port, attrs, invert): - self._check_feature("single-ended output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_out", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="FALSE", - p_use_oe="FALSE", - i_datain=self._get_oreg(m, pin, invert), - o_dataout=port.io, - ) - return m - - def get_tristate(self, pin, port, attrs, invert): - self._check_feature("single-ended tristate", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_out", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="FALSE", - p_use_oe="TRUE", - i_datain=self._get_oreg(m, pin, invert), - o_dataout=port.io, - i_oe=self._get_oereg(m, pin) - ) - return m - - def get_input_output(self, pin, port, attrs, invert): - self._check_feature("single-ended input/output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_bidir", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="FALSE", - i_datain=self._get_oreg(m, pin, invert), - io_dataio=port.io, - o_dataout=self._get_ireg(m, pin, invert), - i_oe=self._get_oereg(m, pin), - ) - return m - - def get_diff_input(self, pin, port, attrs, invert): - self._check_feature("differential input", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.p.attrs["useioff"] = 1 - port.n.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_in", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="TRUE", - i_datain=port.p, - i_datain_b=port.n, - o_dataout=self._get_ireg(m, pin, invert) - ) - return m - - def get_diff_output(self, pin, port, attrs, invert): - self._check_feature("differential output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.p.attrs["useioff"] = 1 - port.n.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_out", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="TRUE", - p_use_oe="FALSE", - i_datain=self._get_oreg(m, pin, invert), - o_dataout=port.p, - o_dataout_b=port.n, - ) - return m - - def get_diff_tristate(self, pin, port, attrs, invert): - self._check_feature("differential tristate", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.p.attrs["useioff"] = 1 - port.n.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_out", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="TRUE", - p_use_oe="TRUE", - i_datain=self._get_oreg(m, pin, invert), - o_dataout=port.p, - o_dataout_b=port.n, - i_oe=self._get_oereg(m, pin), - ) - return m - - def get_diff_input_output(self, pin, port, attrs, invert): - self._check_feature("differential input/output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - if pin.xdr == 1: - port.p.attrs["useioff"] = 1 - port.n.attrs["useioff"] = 1 - - m = Module() - m.submodules[pin.name] = Instance("altiobuf_bidir", - p_enable_bus_hold="FALSE", - p_number_of_channels=pin.width, - p_use_differential_mode="TRUE", - i_datain=self._get_oreg(m, pin, invert), - io_dataio=port.p, - io_dataio_b=port.n, - o_dataout=self._get_ireg(m, pin, invert), - i_oe=self._get_oereg(m, pin), - ) - return m - - # The altera_std_synchronizer{,_bundle} megafunctions embed SDC constraints that mark false - # paths, so use them instead of our default implementation. - - def get_ff_sync(self, ff_sync): - return Instance("altera_std_synchronizer_bundle", - p_width=len(ff_sync.i), - p_depth=ff_sync._stages, - i_clk=ClockSignal(ff_sync._o_domain), - i_reset_n=Const(1), - i_din=ff_sync.i, - o_dout=ff_sync.o, - ) - - def get_async_ff_sync(self, async_ff_sync): - m = Module() - sync_output = Signal() - if async_ff_sync._edge == "pos": - m.submodules += Instance("altera_std_synchronizer", - p_depth=async_ff_sync._stages, - i_clk=ClockSignal(async_ff_sync._o_domain), - i_reset_n=~async_ff_sync.i, - i_din=Const(1), - o_dout=sync_output, - ) - else: - m.submodules += Instance("altera_std_synchronizer", - p_depth=async_ff_sync._stages, - i_clk=ClockSignal(async_ff_sync._o_domain), - i_reset_n=async_ff_sync.i, - i_din=Const(1), - o_dout=sync_output, - ) - m.d.comb += async_ff_sync.o.eq(~sync_output) - return m +import warnings +warnings.warn("instead of nmigen.vendor.intel, use amaranth.vendor.intel", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/lattice_ecp5.py b/nmigen/vendor/lattice_ecp5.py index eb0027f..f53aa56 100644 --- a/nmigen/vendor/lattice_ecp5.py +++ b/nmigen/vendor/lattice_ecp5.py @@ -1,666 +1,7 @@ -from abc import abstractproperty - -from ..hdl import * -from ..build import * +from amaranth.vendor.lattice_ecp5 import * +from amaranth.vendor.lattice_ecp5 import __all__ -__all__ = ["LatticeECP5Platform"] - - -class LatticeECP5Platform(TemplatedPlatform): - """ - Trellis toolchain - ----------------- - - Required tools: - * ``yosys`` - * ``nextpnr-ecp5`` - * ``ecppack`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Trellis``, if present. - - Available overrides: - * ``verbose``: enables logging of informational messages to standard error. - * ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command. - * ``synth_opts``: adds options for ``synth_ecp5`` Yosys command. - * ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script. - * ``script_after_synth``: inserts commands after ``synth_ecp5`` in Yosys script. - * ``yosys_opts``: adds extra options for ``yosys``. - * ``nextpnr_opts``: adds extra options for ``nextpnr-ecp5``. - * ``ecppack_opts``: adds extra options for ``ecppack``. - * ``add_preferences``: inserts commands at the end of the LPF file. - - Build products: - * ``{{name}}.rpt``: Yosys log. - * ``{{name}}.json``: synthesized RTL. - * ``{{name}}.tim``: nextpnr log. - * ``{{name}}.config``: ASCII bitstream. - * ``{{name}}.bit``: binary bitstream. - * ``{{name}}.svf``: JTAG programming vector. - - Diamond toolchain - ----------------- - - Required tools: - * ``pnmainc`` - * ``ddtcmd`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Diamond``, if present. On Linux, diamond_env as provided by Diamond - itself is a good candidate. On Windows, the following script (named ``diamond_env.bat``, - for instance) is known to work:: - - @echo off - set PATH=C:\\lscc\\diamond\\%DIAMOND_VERSION%\\bin\\nt64;%PATH% - - Available overrides: - * ``script_project``: inserts commands before ``prj_project save`` in Tcl script. - * ``script_after_export``: inserts commands after ``prj_run Export`` in Tcl script. - * ``add_preferences``: inserts commands at the end of the LPF file. - * ``add_constraints``: inserts commands at the end of the XDC file. - - Build products: - * ``{{name}}_impl/{{name}}_impl.htm``: consolidated log. - * ``{{name}}.bit``: binary bitstream. - * ``{{name}}.svf``: JTAG programming vector. - """ - - toolchain = None # selected when creating platform - - device = abstractproperty() - package = abstractproperty() - speed = abstractproperty() - grade = "C" # [C]ommercial, [I]ndustrial - - # Trellis templates - - _nextpnr_device_options = { - "LFE5U-12F": "--12k", - "LFE5U-25F": "--25k", - "LFE5U-45F": "--45k", - "LFE5U-85F": "--85k", - "LFE5UM-25F": "--um-25k", - "LFE5UM-45F": "--um-45k", - "LFE5UM-85F": "--um-85k", - "LFE5UM5G-25F": "--um5g-25k", - "LFE5UM5G-45F": "--um5g-45k", - "LFE5UM5G-85F": "--um5g-85k", - } - _nextpnr_package_options = { - "BG256": "caBGA256", - "MG285": "csfBGA285", - "BG381": "caBGA381", - "BG554": "caBGA554", - "BG756": "caBGA756", - } - - _trellis_required_tools = [ - "yosys", - "nextpnr-ecp5", - "ecppack" - ] - _trellis_file_templates = { - **TemplatedPlatform.build_script_templates, - "{{name}}.il": r""" - # {{autogenerated}} - {{emit_rtlil()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.ys": r""" - # {{autogenerated}} - {% for file in platform.iter_files(".v") -%} - read_verilog {{get_override("read_verilog_opts")|options}} {{file}} - {% endfor %} - {% for file in platform.iter_files(".sv") -%} - read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}} - {% endfor %} - {% for file in platform.iter_files(".il") -%} - read_ilang {{file}} - {% endfor %} - read_ilang {{name}}.il - delete w:$verilog_initial_trigger - {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} - synth_ecp5 {{get_override("synth_opts")|options}} -top {{name}} - {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} - write_json {{name}}.json - """, - "{{name}}.lpf": r""" - # {{autogenerated}} - BLOCK ASYNCPATHS; - BLOCK RESETPATHS; - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - LOCATE COMP "{{port_name}}" SITE "{{pin_name}}"; - {% if attrs -%} - IOBUF PORT "{{port_name}}" - {%- for key, value in attrs.items() %} {{key}}={{value}}{% endfor %}; - {% endif %} - {% endfor %} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - FREQUENCY PORT "{{port_signal.name}}" {{frequency}} HZ; - {% else -%} - FREQUENCY NET "{{net_signal|hierarchy(".")}}" {{frequency}} HZ; - {% endif %} - {% endfor %} - {{get_override("add_preferences")|default("# (add_preferences placeholder)")}} - """ - } - _trellis_command_templates = [ - r""" - {{invoke_tool("yosys")}} - {{quiet("-q")}} - {{get_override("yosys_opts")|options}} - -l {{name}}.rpt - {{name}}.ys - """, - r""" - {{invoke_tool("nextpnr-ecp5")}} - {{quiet("--quiet")}} - {{get_override("nextpnr_opts")|options}} - --log {{name}}.tim - {{platform._nextpnr_device_options[platform.device]}} - --package {{platform._nextpnr_package_options[platform.package]|upper}} - --speed {{platform.speed}} - --json {{name}}.json - --lpf {{name}}.lpf - --textcfg {{name}}.config - """, - r""" - {{invoke_tool("ecppack")}} - {{verbose("--verbose")}} - {{get_override("ecppack_opts")|options}} - --input {{name}}.config - --bit {{name}}.bit - --svf {{name}}.svf - """ - ] - - # Diamond templates - - _diamond_required_tools = [ - "pnmainc", - "ddtcmd" - ] - _diamond_file_templates = { - **TemplatedPlatform.build_script_templates, - "build_{{name}}.sh": r""" - # {{autogenerated}} - set -e{{verbose("x")}} - if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi - if [ -n "${{platform._toolchain_env_var}}" ]; then - bindir=$(dirname "${{platform._toolchain_env_var}}") - . "${{platform._toolchain_env_var}}" - fi - {{emit_commands("sh")}} - """, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.tcl": r""" - prj_project new -name {{name}} -impl impl -impl_dir {{name}}_impl \ - -dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} \ - -lpf {{name}}.lpf \ - -synthesis synplify - {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} - prj_src add {{file|tcl_escape}} - {% endfor %} - prj_src add {{name}}.v - prj_impl option top {{name}} - prj_src add {{name}}.sdc - {{get_override("script_project")|default("# (script_project placeholder)")}} - prj_project save - prj_run Synthesis -impl impl - prj_run Translate -impl impl - prj_run Map -impl impl - prj_run PAR -impl impl - prj_run Export -impl impl -task Bitgen - {{get_override("script_after_export")|default("# (script_after_export placeholder)")}} - """, - "{{name}}.lpf": r""" - # {{autogenerated}} - BLOCK ASYNCPATHS; - BLOCK RESETPATHS; - {% for port_name, pin_name, extras in platform.iter_port_constraints_bits() -%} - LOCATE COMP "{{port_name}}" SITE "{{pin_name}}"; - IOBUF PORT "{{port_name}}" - {%- for key, value in extras.items() %} {{key}}={{value}}{% endfor %}; - {% endfor %} - {{get_override("add_preferences")|default("# (add_preferences placeholder)")}} - """, - "{{name}}.sdc": r""" - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] - {% else -%} - create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] - {% endif %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - } - _diamond_command_templates = [ - # These don't have any usable command-line option overrides. - r""" - {{invoke_tool("pnmainc")}} - {{name}}.tcl - """, - r""" - {{invoke_tool("ddtcmd")}} - -oft -bit - -if {{name}}_impl/{{name}}_impl.bit -of {{name}}.bit - """, - r""" - {{invoke_tool("ddtcmd")}} - -oft -svfsingle -revd -op "Fast Program" - -if {{name}}_impl/{{name}}_impl.bit -of {{name}}.svf - """, - ] - - # Common logic - - def __init__(self, *, toolchain="Trellis"): - super().__init__() - - assert toolchain in ("Trellis", "Diamond") - self.toolchain = toolchain - - @property - def required_tools(self): - if self.toolchain == "Trellis": - return self._trellis_required_tools - if self.toolchain == "Diamond": - return self._diamond_required_tools - assert False - - @property - def file_templates(self): - if self.toolchain == "Trellis": - return self._trellis_file_templates - if self.toolchain == "Diamond": - return self._diamond_file_templates - assert False - - @property - def command_templates(self): - if self.toolchain == "Trellis": - return self._trellis_command_templates - if self.toolchain == "Diamond": - return self._diamond_command_templates - assert False - - @property - def default_clk_constraint(self): - if self.default_clk == "OSCG": - return Clock(310e6 / self.oscg_div) - return super().default_clk_constraint - - def create_missing_domain(self, name): - # Lattice ECP5 devices have two global set/reset signals: PUR, which is driven at startup - # by the configuration logic and unconditionally resets every storage element, and GSR, - # which is driven by user logic and each storage element may be configured as affected or - # unaffected by GSR. PUR is purely asynchronous, so even though it is a low-skew global - # network, its deassertion may violate a setup/hold constraint with relation to a user - # clock. To avoid this, a GSR/SGSR instance should be driven synchronized to user clock. - if name == "sync" and self.default_clk is not None: - m = Module() - if self.default_clk == "OSCG": - if not hasattr(self, "oscg_div"): - raise ValueError("OSCG divider (oscg_div) must be an integer between 2 " - "and 128") - if not isinstance(self.oscg_div, int) or self.oscg_div < 2 or self.oscg_div > 128: - raise ValueError("OSCG divider (oscg_div) must be an integer between 2 " - "and 128, not {!r}" - .format(self.oscg_div)) - clk_i = Signal() - m.submodules += Instance("OSCG", p_DIV=self.oscg_div, o_OSC=clk_i) - else: - clk_i = self.request(self.default_clk).i - if self.default_rst is not None: - rst_i = self.request(self.default_rst).i - else: - rst_i = Const(0) - - gsr0 = Signal() - gsr1 = Signal() - # There is no end-of-startup signal on ECP5, but PUR is released after IOB enable, so - # a simple reset synchronizer (with PUR as the asynchronous reset) does the job. - m.submodules += [ - Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=~rst_i, o_Q=gsr0), - Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=gsr0, o_Q=gsr1), - # Although we already synchronize the reset input to user clock, SGSR has dedicated - # clock routing to the center of the FPGA; use that just in case it turns out to be - # more reliable. (None of this is documented.) - Instance("SGSR", i_CLK=clk_i, i_GSR=gsr1), - ] - # GSR implicitly connects to every appropriate storage element. As such, the sync - # domain is reset-less; domains driven by other clocks would need to have dedicated - # reset circuitry or otherwise meet setup/hold constraints on their own. - m.domains += ClockDomain("sync", reset_less=True) - m.d.comb += ClockSignal("sync").eq(clk_i) - return m - - _single_ended_io_types = [ - "HSUL12", "LVCMOS12", "LVCMOS15", "LVCMOS18", "LVCMOS25", "LVCMOS33", "LVTTL33", - "SSTL135_I", "SSTL135_II", "SSTL15_I", "SSTL15_II", "SSTL18_I", "SSTL18_II", - ] - _differential_io_types = [ - "BLVDS25", "BLVDS25E", "HSUL12D", "LVCMOS18D", "LVCMOS25D", "LVCMOS33D", - "LVDS", "LVDS25E", "LVPECL33", "LVPECL33E", "LVTTL33D", "MLVDS", "MLVDS25E", - "SLVS", "SSTL135D_I", "SSTL135D_II", "SSTL15D_I", "SSTL15D_II", "SSTL18D_I", - "SSTL18D_II", "SUBLVDS", - ] - - def should_skip_port_component(self, port, attrs, component): - # On ECP5, a differential IO is placed by only instantiating an IO buffer primitive at - # the PIOA or PIOC location, which is always the non-inverting pin. - if attrs.get("IO_TYPE", "LVCMOS25") in self._differential_io_types and component == "n": - return True - return False - - def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False): - def get_ireg(clk, d, q): - for bit in range(len(q)): - m.submodules += Instance("IFS1P3DX", - i_SCLK=clk, - i_SP=Const(1), - i_CD=Const(0), - i_D=d[bit], - o_Q=q[bit] - ) - - def get_oreg(clk, d, q): - for bit in range(len(q)): - m.submodules += Instance("OFS1P3DX", - i_SCLK=clk, - i_SP=Const(1), - i_CD=Const(0), - i_D=d[bit], - o_Q=q[bit] - ) - - def get_oereg(clk, oe, q): - for bit in range(len(q)): - m.submodules += Instance("OFS1P3DX", - i_SCLK=clk, - i_SP=Const(1), - i_CD=Const(0), - i_D=oe, - o_Q=q[bit] - ) - - def get_iddr(sclk, d, q0, q1): - for bit in range(len(d)): - m.submodules += Instance("IDDRX1F", - i_SCLK=sclk, - i_RST=Const(0), - i_D=d[bit], - o_Q0=q0[bit], o_Q1=q1[bit] - ) - - def get_iddrx2(sclk, eclk, d, q0, q1, q2, q3): - for bit in range(len(d)): - m.submodules += Instance("IDDRX2F", - i_SCLK=sclk, - i_ECLK=eclk, - i_RST=Const(0), - i_D=d[bit], - o_Q0=q0[bit], o_Q1=q1[bit], o_Q2=q2[bit], o_Q3=q3[bit] - ) - - def get_iddr71b(sclk, eclk, d, q0, q1, q2, q3, q4, q5, q6): - for bit in range(len(d)): - m.submodules += Instance("IDDR71B", - i_SCLK=sclk, - i_ECLK=eclk, - i_RST=Const(0), - i_D=d[bit], - o_Q0=q0[bit], o_Q1=q1[bit], o_Q2=q2[bit], o_Q3=q3[bit], - o_Q4=q4[bit], o_Q5=q5[bit], o_Q6=q6[bit], - ) - - def get_oddr(sclk, d0, d1, q): - for bit in range(len(q)): - m.submodules += Instance("ODDRX1F", - i_SCLK=sclk, - i_RST=Const(0), - i_D0=d0[bit], i_D1=d1[bit], - o_Q=q[bit] - ) - - def get_oddrx2(sclk, eclk, d0, d1, d2, d3, q): - for bit in range(len(q)): - m.submodules += Instance("ODDRX2F", - i_SCLK=sclk, - i_ECLK=eclk, - i_RST=Const(0), - i_D0=d0[bit], i_D1=d1[bit], i_D2=d2[bit], i_D3=d3[bit], - o_Q=q[bit] - ) - - def get_oddr71b(sclk, eclk, d0, d1, d2, d3, d4, d5, d6, q): - for bit in range(len(q)): - m.submodules += Instance("ODDR71B", - i_SCLK=sclk, - i_ECLK=eclk, - i_RST=Const(0), - i_D0=d0[bit], i_D1=d1[bit], i_D2=d2[bit], i_D3=d3[bit], - i_D4=d4[bit], i_D5=d5[bit], i_D6=d6[bit], - o_Q=q[bit] - ) - - def get_ineg(z, invert): - if invert: - a = Signal.like(z, name_suffix="_n") - m.d.comb += z.eq(~a) - return a - else: - return z - - def get_oneg(a, invert): - if invert: - z = Signal.like(a, name_suffix="_n") - m.d.comb += z.eq(~a) - return z - else: - return a - - if "i" in pin.dir: - if pin.xdr < 2: - pin_i = get_ineg(pin.i, i_invert) - elif pin.xdr == 2: - pin_i0 = get_ineg(pin.i0, i_invert) - pin_i1 = get_ineg(pin.i1, i_invert) - elif pin.xdr == 4: - pin_i0 = get_ineg(pin.i0, i_invert) - pin_i1 = get_ineg(pin.i1, i_invert) - pin_i2 = get_ineg(pin.i2, i_invert) - pin_i3 = get_ineg(pin.i3, i_invert) - elif pin.xdr == 7: - pin_i0 = get_ineg(pin.i0, i_invert) - pin_i1 = get_ineg(pin.i1, i_invert) - pin_i2 = get_ineg(pin.i2, i_invert) - pin_i3 = get_ineg(pin.i3, i_invert) - pin_i4 = get_ineg(pin.i4, i_invert) - pin_i5 = get_ineg(pin.i5, i_invert) - pin_i6 = get_ineg(pin.i6, i_invert) - if "o" in pin.dir: - if pin.xdr < 2: - pin_o = get_oneg(pin.o, o_invert) - elif pin.xdr == 2: - pin_o0 = get_oneg(pin.o0, o_invert) - pin_o1 = get_oneg(pin.o1, o_invert) - elif pin.xdr == 4: - pin_o0 = get_oneg(pin.o0, o_invert) - pin_o1 = get_oneg(pin.o1, o_invert) - pin_o2 = get_oneg(pin.o2, o_invert) - pin_o3 = get_oneg(pin.o3, o_invert) - elif pin.xdr == 7: - pin_o0 = get_oneg(pin.o0, o_invert) - pin_o1 = get_oneg(pin.o1, o_invert) - pin_o2 = get_oneg(pin.o2, o_invert) - pin_o3 = get_oneg(pin.o3, o_invert) - pin_o4 = get_oneg(pin.o4, o_invert) - pin_o5 = get_oneg(pin.o5, o_invert) - pin_o6 = get_oneg(pin.o6, o_invert) - - i = o = t = None - if "i" in pin.dir: - i = Signal(pin.width, name="{}_xdr_i".format(pin.name)) - if "o" in pin.dir: - o = Signal(pin.width, name="{}_xdr_o".format(pin.name)) - if pin.dir in ("oe", "io"): - t = Signal(pin.width, name="{}_xdr_t".format(pin.name)) - - if pin.xdr == 0: - if "i" in pin.dir: - i = pin_i - if "o" in pin.dir: - o = pin_o - if pin.dir in ("oe", "io"): - t = Repl(~pin.oe, pin.width) - elif pin.xdr == 1: - if "i" in pin.dir: - get_ireg(pin.i_clk, i, pin_i) - if "o" in pin.dir: - get_oreg(pin.o_clk, pin_o, o) - if pin.dir in ("oe", "io"): - get_oereg(pin.o_clk, ~pin.oe, t) - elif pin.xdr == 2: - if "i" in pin.dir: - get_iddr(pin.i_clk, i, pin_i0, pin_i1) - if "o" in pin.dir: - get_oddr(pin.o_clk, pin_o0, pin_o1, o) - if pin.dir in ("oe", "io"): - get_oereg(pin.o_clk, ~pin.oe, t) - elif pin.xdr == 4: - if "i" in pin.dir: - get_iddrx2(pin.i_clk, pin.i_fclk, i, pin_i0, pin_i1, pin_i2, pin_i3) - if "o" in pin.dir: - get_oddrx2(pin.o_clk, pin.o_fclk, pin_o0, pin_o1, pin_o2, pin_o3, o) - if pin.dir in ("oe", "io"): - get_oereg(pin.o_clk, ~pin.oe, t) - elif pin.xdr == 7: - if "i" in pin.dir: - get_iddr71b(pin.i_clk, pin.i_fclk, i, pin_i0, pin_i1, pin_i2, pin_i3, pin_i4, pin_i5, pin_i6) - if "o" in pin.dir: - get_oddr71b(pin.o_clk, pin.o_fclk, pin_o0, pin_o1, pin_o2, pin_o3, pin_o4, pin_o5, pin_o6, o) - if pin.dir in ("oe", "io"): - get_oereg(pin.o_clk, ~pin.oe, t) - else: - assert False - - return (i, o, t) - - def get_input(self, pin, port, attrs, invert): - self._check_feature("single-ended input", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", - i_I=port.io[bit], - o_O=i[bit] - ) - return m - - def get_output(self, pin, port, attrs, invert): - self._check_feature("single-ended output", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", - i_I=o[bit], - o_O=port.io[bit] - ) - return m - - def get_tristate(self, pin, port, attrs, invert): - self._check_feature("single-ended tristate", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", - i_T=t[bit], - i_I=o[bit], - o_O=port.io[bit] - ) - return m - - def get_input_output(self, pin, port, attrs, invert): - self._check_feature("single-ended input/output", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", - i_T=t[bit], - i_I=o[bit], - o_O=i[bit], - io_B=port.io[bit] - ) - return m - - def get_diff_input(self, pin, port, attrs, invert): - self._check_feature("differential input", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", - i_I=port.p[bit], - o_O=i[bit] - ) - return m - - def get_diff_output(self, pin, port, attrs, invert): - self._check_feature("differential output", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", - i_I=o[bit], - o_O=port.p[bit], - ) - return m - - def get_diff_tristate(self, pin, port, attrs, invert): - self._check_feature("differential tristate", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", - i_T=t[bit], - i_I=o[bit], - o_O=port.p[bit], - ) - return m - - def get_diff_input_output(self, pin, port, attrs, invert): - self._check_feature("differential input/output", pin, attrs, - valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", - i_T=t[bit], - i_I=o[bit], - o_O=i[bit], - io_B=port.p[bit], - ) - return m - - # CDC primitives are not currently specialized for ECP5. - # While Diamond supports false path constraints; nextpnr-ecp5 does not. +import warnings +warnings.warn("instead of nmigen.vendor.lattice_ecp5, use amaranth.vendor.lattice_ecp5", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/lattice_ice40.py b/nmigen/vendor/lattice_ice40.py index c9c35b3..a4f142e 100644 --- a/nmigen/vendor/lattice_ice40.py +++ b/nmigen/vendor/lattice_ice40.py @@ -1,627 +1,7 @@ -from abc import abstractproperty - -from ..hdl import * -from ..lib.cdc import ResetSynchronizer -from ..build import * +from amaranth.vendor.lattice_ice40 import * +from amaranth.vendor.lattice_ice40 import __all__ -__all__ = ["LatticeICE40Platform"] - - -class LatticeICE40Platform(TemplatedPlatform): - """ - IceStorm toolchain - ------------------ - - Required tools: - * ``yosys`` - * ``nextpnr-ice40`` - * ``icepack`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_IceStorm``, if present. - - Available overrides: - * ``verbose``: enables logging of informational messages to standard error. - * ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command. - * ``synth_opts``: adds options for ``synth_ice40`` Yosys command. - * ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script. - * ``script_after_synth``: inserts commands after ``synth_ice40`` in Yosys script. - * ``yosys_opts``: adds extra options for ``yosys``. - * ``nextpnr_opts``: adds extra options for ``nextpnr-ice40``. - * ``add_pre_pack``: inserts commands at the end in pre-pack Python script. - * ``add_constraints``: inserts commands at the end in the PCF file. - - Build products: - * ``{{name}}.rpt``: Yosys log. - * ``{{name}}.json``: synthesized RTL. - * ``{{name}}.tim``: nextpnr log. - * ``{{name}}.asc``: ASCII bitstream. - * ``{{name}}.bin``: binary bitstream. - - iCECube2 toolchain - ------------------ - - This toolchain comes in two variants: ``LSE-iCECube2`` and ``Synplify-iCECube2``. - - Required tools: - * iCECube2 toolchain - * ``tclsh`` - - The environment is populated by setting the necessary environment variables based on - ``NMIGEN_ENV_iCECube2``, which must point to the root of the iCECube2 installation, and - is required. - - Available overrides: - * ``verbose``: enables logging of informational messages to standard error. - * ``lse_opts``: adds options for LSE. - * ``script_after_add``: inserts commands after ``add_file`` in Synplify Tcl script. - * ``script_after_options``: inserts commands after ``set_option`` in Synplify Tcl script. - * ``add_constraints``: inserts commands in SDC file. - * ``script_after_flow``: inserts commands after ``run_sbt_backend_auto`` in SBT - Tcl script. - - Build products: - * ``{{name}}_lse.log`` (LSE) or ``{{name}}_design/{{name}}.htm`` (Synplify): synthesis log. - * ``sbt/outputs/router/{{name}}_timing.rpt``: timing report. - * ``{{name}}.edf``: EDIF netlist. - * ``{{name}}.bin``: binary bitstream. - """ - - toolchain = None # selected when creating platform - - device = abstractproperty() - package = abstractproperty() - - # IceStorm templates - - _nextpnr_device_options = { - "iCE40LP384": "--lp384", - "iCE40LP1K": "--lp1k", - "iCE40LP4K": "--lp8k", - "iCE40LP8K": "--lp8k", - "iCE40HX1K": "--hx1k", - "iCE40HX4K": "--hx8k", - "iCE40HX8K": "--hx8k", - "iCE40UP5K": "--up5k", - "iCE40UP3K": "--up5k", - "iCE5LP4K": "--u4k", - "iCE5LP2K": "--u4k", - "iCE5LP1K": "--u4k", - } - _nextpnr_package_options = { - "iCE40LP4K": ":4k", - "iCE40HX4K": ":4k", - "iCE40UP3K": "", - "iCE5LP2K": "", - "iCE5LP1K": "", - } - - _icestorm_required_tools = [ - "yosys", - "nextpnr-ice40", - "icepack", - ] - _icestorm_file_templates = { - **TemplatedPlatform.build_script_templates, - "{{name}}.il": r""" - # {{autogenerated}} - {{emit_rtlil()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.ys": r""" - # {{autogenerated}} - {% for file in platform.iter_files(".v") -%} - read_verilog {{get_override("read_verilog_opts")|options}} {{file}} - {% endfor %} - {% for file in platform.iter_files(".sv") -%} - read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}} - {% endfor %} - {% for file in platform.iter_files(".il") -%} - read_ilang {{file}} - {% endfor %} - read_ilang {{name}}.il - delete w:$verilog_initial_trigger - {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} - synth_ice40 {{get_override("synth_opts")|options}} -top {{name}} - {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} - write_json {{name}}.json - """, - "{{name}}.pcf": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_io {{port_name}} {{pin_name}} - {% endfor %} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - set_frequency {{net_signal|hierarchy(".")}} {{frequency/1000000}} - {% endfor%} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - } - _icestorm_command_templates = [ - r""" - {{invoke_tool("yosys")}} - {{quiet("-q")}} - {{get_override("yosys_opts")|options}} - -l {{name}}.rpt - {{name}}.ys - """, - r""" - {{invoke_tool("nextpnr-ice40")}} - {{quiet("--quiet")}} - {{get_override("nextpnr_opts")|options}} - --log {{name}}.tim - {{platform._nextpnr_device_options[platform.device]}} - --package - {{platform.package|lower}}{{platform._nextpnr_package_options[platform.device]| - default("")}} - --json {{name}}.json - --pcf {{name}}.pcf - --asc {{name}}.asc - """, - r""" - {{invoke_tool("icepack")}} - {{verbose("-v")}} - {{name}}.asc - {{name}}.bin - """ - ] - - # iCECube2 templates - - _icecube2_required_tools = [ - "synthesis", - "synpwrap", - "tclsh", - ] - _icecube2_file_templates = { - **TemplatedPlatform.build_script_templates, - "build_{{name}}.sh": r""" - # {{autogenerated}} - set -e{{verbose("x")}} - if [ -n "${{platform._toolchain_env_var}}" ]; then - # LSE environment - export LD_LIBRARY_PATH=${{platform._toolchain_env_var}}/LSE/bin/lin64:$LD_LIBRARY_PATH - export PATH=${{platform._toolchain_env_var}}/LSE/bin/lin64:$PATH - export FOUNDRY=${{platform._toolchain_env_var}}/LSE - # Synplify environment - export LD_LIBRARY_PATH=${{platform._toolchain_env_var}}/sbt_backend/bin/linux/opt/synpwrap:$LD_LIBRARY_PATH - export PATH=${{platform._toolchain_env_var}}/sbt_backend/bin/linux/opt/synpwrap:$PATH - export SYNPLIFY_PATH=${{platform._toolchain_env_var}}/synpbase - # Common environment - export SBT_DIR=${{platform._toolchain_env_var}}/sbt_backend - else - echo "Variable ${{platform._toolchain_env_var}} must be set" >&2; exit 1 - fi - {{emit_commands("sh")}} - """, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}_lse.prj": r""" - # {{autogenerated}} - -a SBT{{platform.family}} - -d {{platform.device}} - -t {{platform.package}} - {{get_override("lse_opts")|options|default("# (lse_opts placeholder)")}} - {% for file in platform.iter_files(".v") -%} - -ver {{file}} - {% endfor %} - -ver {{name}}.v - -sdc {{name}}.sdc - -top {{name}} - -output_edif {{name}}.edf - -logfile {{name}}_lse.log - """, - "{{name}}_syn.prj": r""" - # {{autogenerated}} - {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} - add_file -verilog {{file|tcl_escape}} - {% endfor %} - add_file -verilog {{name}}.v - add_file -constraint {{name}}.sdc - {{get_override("script_after_add")|default("# (script_after_add placeholder)")}} - impl -add {{name}}_design -type fpga - set_option -technology SBT{{platform.family}} - set_option -part {{platform.device}} - set_option -package {{platform.package}} - {{get_override("script_after_options")|default("# (script_after_options placeholder)")}} - project -result_format edif - project -result_file {{name}}.edf - impl -active {{name}}_design - project -run compile - project -run map - project -run fpga_mapper - file copy -force -- {{name}}_design/{{name}}.edf {{name}}.edf - """, - "{{name}}.sdc": r""" - # {{autogenerated}} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] - {% else -%} - create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] - {% endif %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - "{{name}}.tcl": r""" - # {{autogenerated}} - set device {{platform.device}}-{{platform.package}} - set top_module {{name}} - set proj_dir . - set output_dir . - set edif_file {{name}} - set tool_options ":edifparser -y {{name}}.pcf" - set sbt_root $::env(SBT_DIR) - append sbt_tcl $sbt_root "/tcl/sbt_backend_synpl.tcl" - source $sbt_tcl - run_sbt_backend_auto $device $top_module $proj_dir $output_dir $tool_options $edif_file - {{get_override("script_after_file")|default("# (script_after_file placeholder)")}} - file copy -force -- sbt/outputs/bitmap/{{name}}_bitmap.bin {{name}}.bin - exit - """, - "{{name}}.pcf": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_io {{port_name}} {{pin_name}} - {% endfor %} - """, - } - _lse_icecube2_command_templates = [ - r"""synthesis -f {{name}}_lse.prj""", - r"""tclsh {{name}}.tcl""", - ] - _synplify_icecube2_command_templates = [ - r"""synpwrap -prj {{name}}_syn.prj -log {{name}}_syn.log""", - r"""tclsh {{name}}.tcl""", - ] - - # Common logic - - def __init__(self, *, toolchain="IceStorm"): - super().__init__() - - assert toolchain in ("IceStorm", "LSE-iCECube2", "Synplify-iCECube2") - self.toolchain = toolchain - - @property - def family(self): - if self.device.startswith("iCE40"): - return "iCE40" - if self.device.startswith("iCE5"): - return "iCE5" - assert False - - @property - def _toolchain_env_var(self): - if self.toolchain == "IceStorm": - return f"NMIGEN_ENV_{self.toolchain}" - if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"): - return f"NMIGEN_ENV_iCECube2" - assert False - - @property - def required_tools(self): - if self.toolchain == "IceStorm": - return self._icestorm_required_tools - if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"): - return self._icecube2_required_tools - assert False - - @property - def file_templates(self): - if self.toolchain == "IceStorm": - return self._icestorm_file_templates - if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"): - return self._icecube2_file_templates - assert False - - @property - def command_templates(self): - if self.toolchain == "IceStorm": - return self._icestorm_command_templates - if self.toolchain == "LSE-iCECube2": - return self._lse_icecube2_command_templates - if self.toolchain == "Synplify-iCECube2": - return self._synplify_icecube2_command_templates - assert False - - @property - def default_clk_constraint(self): - # Internal high-speed oscillator: 48 MHz / (2 ^ div) - if self.default_clk == "SB_HFOSC": - return Clock(48e6 / 2 ** self.hfosc_div) - # Internal low-speed oscillator: 10 KHz - elif self.default_clk == "SB_LFOSC": - return Clock(10e3) - # Otherwise, use the defined Clock resource. - return super().default_clk_constraint - - def create_missing_domain(self, name): - # For unknown reasons (no errata was ever published, and no documentation mentions this - # issue), iCE40 BRAMs read as zeroes for ~3 us after configuration and release of internal - # global reset. Note that this is a *time-based* delay, generated purely by the internal - # oscillator, which may not be observed nor influenced directly. For details, see links: - # * https://github.com/cliffordwolf/icestorm/issues/76#issuecomment-289270411 - # * https://github.com/cliffordwolf/icotools/issues/2#issuecomment-299734673 - # - # To handle this, it is necessary to have a global reset in any iCE40 design that may - # potentially instantiate BRAMs, and assert this reset for >3 us after configuration. - # (We add a margin of 5x to allow for PVT variation.) If the board includes a dedicated - # reset line, this line is ORed with the power on reset. - # - # If an internal oscillator is selected as the default clock source, the power-on-reset - # delay is increased to 100 us, since the oscillators are only stable after that long. - # - # The power-on reset timer counts up because the vendor tools do not support initialization - # of flip-flops. - if name == "sync" and self.default_clk is not None: - m = Module() - - # Internal high-speed clock: 6 MHz, 12 MHz, 24 MHz, or 48 MHz depending on the divider. - if self.default_clk == "SB_HFOSC": - if not hasattr(self, "hfosc_div"): - raise ValueError("SB_HFOSC divider exponent (hfosc_div) must be an integer " - "between 0 and 3") - if not isinstance(self.hfosc_div, int) or self.hfosc_div < 0 or self.hfosc_div > 3: - raise ValueError("SB_HFOSC divider exponent (hfosc_div) must be an integer " - "between 0 and 3, not {!r}" - .format(self.hfosc_div)) - clk_i = Signal() - m.submodules += Instance("SB_HFOSC", - i_CLKHFEN=1, - i_CLKHFPU=1, - p_CLKHF_DIV="0b{0:02b}".format(self.hfosc_div), - o_CLKHF=clk_i) - delay = int(100e-6 * self.default_clk_frequency) - # Internal low-speed clock: 10 KHz. - elif self.default_clk == "SB_LFOSC": - clk_i = Signal() - m.submodules += Instance("SB_LFOSC", - i_CLKLFEN=1, - i_CLKLFPU=1, - o_CLKLF=clk_i) - delay = int(100e-6 * self.default_clk_frequency) - # User-defined clock signal. - else: - clk_i = self.request(self.default_clk).i - delay = int(15e-6 * self.default_clk_frequency) - - if self.default_rst is not None: - rst_i = self.request(self.default_rst).i - else: - rst_i = Const(0) - - # Power-on-reset domain - m.domains += ClockDomain("por", reset_less=True, local=True) - timer = Signal(range(delay)) - ready = Signal() - m.d.comb += ClockSignal("por").eq(clk_i) - with m.If(timer == delay): - m.d.por += ready.eq(1) - with m.Else(): - m.d.por += timer.eq(timer + 1) - - # Primary domain - m.domains += ClockDomain("sync") - m.d.comb += ClockSignal("sync").eq(clk_i) - if self.default_rst is not None: - m.submodules.reset_sync = ResetSynchronizer(~ready | rst_i, domain="sync") - else: - m.d.comb += ResetSignal("sync").eq(~ready) - - return m - - def should_skip_port_component(self, port, attrs, component): - # On iCE40, a differential input is placed by only instantiating an SB_IO primitive for - # the pin with z=0, which is the non-inverting pin. The pinout unfortunately differs - # between LP/HX and UP series: - # * for LP/HX, z=0 is DPxxB (B is non-inverting, A is inverting) - # * for UP, z=0 is IOB_xxA (A is non-inverting, B is inverting) - if attrs.get("IO_STANDARD", "SB_LVCMOS") == "SB_LVDS_INPUT" and component == "n": - return True - return False - - def _get_io_buffer(self, m, pin, port, attrs, *, i_invert=False, o_invert=False, - invert_lut=False): - def get_dff(clk, d, q): - m.submodules += Instance("$dff", - p_CLK_POLARITY=1, - p_WIDTH=len(d), - i_CLK=clk, - i_D=d, - o_Q=q) - - def get_ineg(y, invert): - if invert_lut: - a = Signal.like(y, name_suffix="_x{}".format(1 if invert else 0)) - for bit in range(len(y)): - m.submodules += Instance("SB_LUT4", - p_LUT_INIT=Const(0b01 if invert else 0b10, 16), - i_I0=a[bit], - i_I1=Const(0), - i_I2=Const(0), - i_I3=Const(0), - o_O=y[bit]) - return a - elif invert: - a = Signal.like(y, name_suffix="_n") - m.d.comb += y.eq(~a) - return a - else: - return y - - def get_oneg(a, invert): - if invert_lut: - y = Signal.like(a, name_suffix="_x{}".format(1 if invert else 0)) - for bit in range(len(a)): - m.submodules += Instance("SB_LUT4", - p_LUT_INIT=Const(0b01 if invert else 0b10, 16), - i_I0=a[bit], - i_I1=Const(0), - i_I2=Const(0), - i_I3=Const(0), - o_O=y[bit]) - return y - elif invert: - y = Signal.like(a, name_suffix="_n") - m.d.comb += y.eq(~a) - return y - else: - return a - - if "GLOBAL" in attrs: - is_global_input = bool(attrs["GLOBAL"]) - del attrs["GLOBAL"] - else: - is_global_input = False - assert not (is_global_input and i_invert) - - if "i" in pin.dir: - if pin.xdr < 2: - pin_i = get_ineg(pin.i, i_invert) - elif pin.xdr == 2: - pin_i0 = get_ineg(pin.i0, i_invert) - pin_i1 = get_ineg(pin.i1, i_invert) - if "o" in pin.dir: - if pin.xdr < 2: - pin_o = get_oneg(pin.o, o_invert) - elif pin.xdr == 2: - pin_o0 = get_oneg(pin.o0, o_invert) - pin_o1 = get_oneg(pin.o1, o_invert) - - if "i" in pin.dir and pin.xdr == 2: - i0_ff = Signal.like(pin_i0, name_suffix="_ff") - i1_ff = Signal.like(pin_i1, name_suffix="_ff") - get_dff(pin.i_clk, i0_ff, pin_i0) - get_dff(pin.i_clk, i1_ff, pin_i1) - if "o" in pin.dir and pin.xdr == 2: - o1_ff = Signal.like(pin_o1, name_suffix="_ff") - get_dff(pin.o_clk, pin_o1, o1_ff) - - for bit in range(len(port)): - io_args = [ - ("io", "PACKAGE_PIN", port[bit]), - *(("p", key, value) for key, value in attrs.items()), - ] - - if "i" not in pin.dir: - # If no input pin is requested, it is important to use a non-registered input pin - # type, because an output-only pin would not have an input clock, and if its input - # is configured as registered, this would prevent a co-located input-capable pin - # from using an input clock. - i_type = 0b01 # PIN_INPUT - elif pin.xdr == 0: - i_type = 0b01 # PIN_INPUT - elif pin.xdr > 0: - i_type = 0b00 # PIN_INPUT_REGISTERED aka PIN_INPUT_DDR - if "o" not in pin.dir: - o_type = 0b0000 # PIN_NO_OUTPUT - elif pin.xdr == 0 and pin.dir == "o": - o_type = 0b0110 # PIN_OUTPUT - elif pin.xdr == 0: - o_type = 0b1010 # PIN_OUTPUT_TRISTATE - elif pin.xdr == 1 and pin.dir == "o": - o_type = 0b0101 # PIN_OUTPUT_REGISTERED - elif pin.xdr == 1: - o_type = 0b1101 # PIN_OUTPUT_REGISTERED_ENABLE_REGISTERED - elif pin.xdr == 2 and pin.dir == "o": - o_type = 0b0100 # PIN_OUTPUT_DDR - elif pin.xdr == 2: - o_type = 0b1100 # PIN_OUTPUT_DDR_ENABLE_REGISTERED - io_args.append(("p", "PIN_TYPE", C((o_type << 2) | i_type, 6))) - - if hasattr(pin, "i_clk"): - io_args.append(("i", "INPUT_CLK", pin.i_clk)) - if hasattr(pin, "o_clk"): - io_args.append(("i", "OUTPUT_CLK", pin.o_clk)) - - if "i" in pin.dir: - if pin.xdr == 0 and is_global_input: - io_args.append(("o", "GLOBAL_BUFFER_OUTPUT", pin.i[bit])) - elif pin.xdr < 2: - io_args.append(("o", "D_IN_0", pin_i[bit])) - elif pin.xdr == 2: - # Re-register both inputs before they enter fabric. This increases hold time - # to an entire cycle, and adds one cycle of latency. - io_args.append(("o", "D_IN_0", i0_ff[bit])) - io_args.append(("o", "D_IN_1", i1_ff[bit])) - if "o" in pin.dir: - if pin.xdr < 2: - io_args.append(("i", "D_OUT_0", pin_o[bit])) - elif pin.xdr == 2: - # Re-register negedge output after it leaves fabric. This increases setup time - # to an entire cycle, and doesn't add latency. - io_args.append(("i", "D_OUT_0", pin_o0[bit])) - io_args.append(("i", "D_OUT_1", o1_ff[bit])) - - if pin.dir in ("oe", "io"): - io_args.append(("i", "OUTPUT_ENABLE", pin.oe)) - - if is_global_input: - m.submodules["{}_{}".format(pin.name, bit)] = Instance("SB_GB_IO", *io_args) - else: - m.submodules["{}_{}".format(pin.name, bit)] = Instance("SB_IO", *io_args) - - def get_input(self, pin, port, attrs, invert): - self._check_feature("single-ended input", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - self._get_io_buffer(m, pin, port.io, attrs, i_invert=invert) - return m - - def get_output(self, pin, port, attrs, invert): - self._check_feature("single-ended output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - self._get_io_buffer(m, pin, port.io, attrs, o_invert=invert) - return m - - def get_tristate(self, pin, port, attrs, invert): - self._check_feature("single-ended tristate", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - self._get_io_buffer(m, pin, port.io, attrs, o_invert=invert) - return m - - def get_input_output(self, pin, port, attrs, invert): - self._check_feature("single-ended input/output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - self._get_io_buffer(m, pin, port.io, attrs, i_invert=invert, o_invert=invert) - return m - - def get_diff_input(self, pin, port, attrs, invert): - self._check_feature("differential input", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - # See comment in should_skip_port_component above. - self._get_io_buffer(m, pin, port.p, attrs, i_invert=invert) - return m - - def get_diff_output(self, pin, port, attrs, invert): - self._check_feature("differential output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - # Note that the non-inverting output pin is not driven the same way as a regular - # output pin. The inverter introduces a delay, so for a non-inverting output pin, - # an identical delay is introduced by instantiating a LUT. This makes the waveform - # perfectly symmetric in the xdr=0 case. - self._get_io_buffer(m, pin, port.p, attrs, o_invert= invert, invert_lut=True) - self._get_io_buffer(m, pin, port.n, attrs, o_invert=not invert, invert_lut=True) - return m - - # Tristate bidirectional buffers are not supported on iCE40 because it requires external - # termination, which is different for differential pins configured as inputs and outputs. - - # CDC primitives are not currently specialized for iCE40. It is not known if iCECube2 supports - # the necessary attributes; nextpnr-ice40 does not. +import warnings +warnings.warn("instead of nmigen.vendor.lattice_ice40, use amaranth.vendor.lattice_ice40", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/lattice_machxo2.py b/nmigen/vendor/lattice_machxo2.py index b9fadab..9752407 100644 --- a/nmigen/vendor/lattice_machxo2.py +++ b/nmigen/vendor/lattice_machxo2.py @@ -1,11 +1,7 @@ +from amaranth.vendor.lattice_machxo2 import * +from amaranth.vendor.lattice_machxo2 import __all__ + + import warnings - -from .lattice_machxo_2_3l import LatticeMachXO2Platform - - -__all__ = ["LatticeMachXO2Platform"] - - -# TODO(nmigen-0.4): remove -warnings.warn("instead of nmigen.vendor.lattice_machxo2, use nmigen.vendor.lattice_machxo_2_3l", +warnings.warn("instead of nmigen.vendor.lattice_machxo2, use amaranth.vendor.lattice_machxo2", DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/lattice_machxo_2_3l.py b/nmigen/vendor/lattice_machxo_2_3l.py index 1e2b90f..5275a0a 100644 --- a/nmigen/vendor/lattice_machxo_2_3l.py +++ b/nmigen/vendor/lattice_machxo_2_3l.py @@ -1,421 +1,7 @@ -from abc import abstractproperty - -from ..hdl import * -from ..build import * +from amaranth.vendor.lattice_machxo_2_3l import * +from amaranth.vendor.lattice_machxo_2_3l import __all__ -__all__ = ["LatticeMachXO2Platform", "LatticeMachXO3LPlatform"] - - -# MachXO2 and MachXO3L primitives are the same. Handle both using -# one class and expose user-aliases for convenience. -class LatticeMachXO2Or3LPlatform(TemplatedPlatform): - """ - Required tools: - * ``pnmainc`` - * ``ddtcmd`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Diamond``, if present. On Linux, diamond_env as provided by Diamond - itself is a good candidate. On Windows, the following script (named ``diamond_env.bat``, - for instance) is known to work:: - - @echo off - set PATH=C:\\lscc\\diamond\\%DIAMOND_VERSION%\\bin\\nt64;%PATH% - - Available overrides: - * ``script_project``: inserts commands before ``prj_project save`` in Tcl script. - * ``script_after_export``: inserts commands after ``prj_run Export`` in Tcl script. - * ``add_preferences``: inserts commands at the end of the LPF file. - * ``add_constraints``: inserts commands at the end of the XDC file. - - Build products: - * ``{{name}}_impl/{{name}}_impl.htm``: consolidated log. - * ``{{name}}.jed``: JEDEC fuse file. - * ``{{name}}.bit``: binary bitstream. - * ``{{name}}.svf``: JTAG programming vector for FLASH programming. - * ``{{name}}_flash.svf``: JTAG programming vector for FLASH programming. - * ``{{name}}_sram.svf``: JTAG programming vector for SRAM programming. - """ - - toolchain = "Diamond" - - device = abstractproperty() - package = abstractproperty() - speed = abstractproperty() - grade = "C" # [C]ommercial, [I]ndustrial - - required_tools = [ - "pnmainc", - "ddtcmd" - ] - file_templates = { - **TemplatedPlatform.build_script_templates, - "build_{{name}}.sh": r""" - # {{autogenerated}} - set -e{{verbose("x")}} - if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi - if [ -n "${{platform._toolchain_env_var}}" ]; then - bindir=$(dirname "${{platform._toolchain_env_var}}") - . "${{platform._toolchain_env_var}}" - fi - {{emit_commands("sh")}} - """, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.tcl": r""" - prj_project new -name {{name}} -impl impl -impl_dir {{name}}_impl \ - -dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} \ - -lpf {{name}}.lpf \ - -synthesis synplify - {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} - prj_src add {{file|tcl_escape}} - {% endfor %} - prj_src add {{name}}.v - prj_impl option top {{name}} - prj_src add {{name}}.sdc - {{get_override("script_project")|default("# (script_project placeholder)")}} - prj_project save - prj_run Synthesis -impl impl - prj_run Translate -impl impl - prj_run Map -impl impl - prj_run PAR -impl impl - prj_run Export -impl impl -task Bitgen - prj_run Export -impl impl -task Jedecgen - {{get_override("script_after_export")|default("# (script_after_export placeholder)")}} - """, - "{{name}}.lpf": r""" - # {{autogenerated}} - BLOCK ASYNCPATHS; - BLOCK RESETPATHS; - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - LOCATE COMP "{{port_name}}" SITE "{{pin_name}}"; - {% if attrs -%} - IOBUF PORT "{{port_name}}" - {%- for key, value in attrs.items() %} {{key}}={{value}}{% endfor %}; - {% endif %} - {% endfor %} - {{get_override("add_preferences")|default("# (add_preferences placeholder)")}} - """, - "{{name}}.sdc": r""" - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] - {% else -%} - create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] - {% endif %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - } - command_templates = [ - # These don't have any usable command-line option overrides. - r""" - {{invoke_tool("pnmainc")}} - {{name}}.tcl - """, - r""" - {{invoke_tool("ddtcmd")}} - -oft -bit - -if {{name}}_impl/{{name}}_impl.bit -of {{name}}.bit - """, - r""" - {{invoke_tool("ddtcmd")}} - -oft -jed - -dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} - -if {{name}}_impl/{{name}}_impl.jed -of {{name}}.jed - """, - r""" - {{invoke_tool("ddtcmd")}} - -oft -svfsingle -revd -op "FLASH Erase,Program,Verify" - -if {{name}}_impl/{{name}}_impl.jed -of {{name}}_flash.svf - """, - # TODO(nmigen-0.4): remove - r""" - {% if syntax == "bat" -%} - copy {{name}}_flash.svf {{name}}.svf - {% else -%} - cp {{name}}_flash.svf {{name}}.svf - {% endif %} - """, - r""" - {{invoke_tool("ddtcmd")}} - -oft -svfsingle -revd -op "SRAM Fast Program" - -if {{name}}_impl/{{name}}_impl.bit -of {{name}}_sram.svf - """, - ] - - def create_missing_domain(self, name): - # Lattice MachXO2/MachXO3L devices have two global set/reset signals: PUR, which is driven at - # startup by the configuration logic and unconditionally resets every storage element, - # and GSR, which is driven by user logic and each storage element may be configured as - # affected or unaffected by GSR. PUR is purely asynchronous, so even though it is - # a low-skew global network, its deassertion may violate a setup/hold constraint with - # relation to a user clock. To avoid this, a GSR/SGSR instance should be driven - # synchronized to user clock. - if name == "sync" and self.default_clk is not None: - clk_i = self.request(self.default_clk).i - if self.default_rst is not None: - rst_i = self.request(self.default_rst).i - else: - rst_i = Const(0) - - gsr0 = Signal() - gsr1 = Signal() - m = Module() - # There is no end-of-startup signal on MachXO2/MachXO3L, but PUR is released after IOB - # enable, so a simple reset synchronizer (with PUR as the asynchronous reset) does the job. - m.submodules += [ - Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=~rst_i, o_Q=gsr0), - Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=gsr0, o_Q=gsr1), - # Although we already synchronize the reset input to user clock, SGSR has dedicated - # clock routing to the center of the FPGA; use that just in case it turns out to be - # more reliable. (None of this is documented.) - Instance("SGSR", i_CLK=clk_i, i_GSR=gsr1), - ] - # GSR implicitly connects to every appropriate storage element. As such, the sync - # domain is reset-less; domains driven by other clocks would need to have dedicated - # reset circuitry or otherwise meet setup/hold constraints on their own. - m.domains += ClockDomain("sync", reset_less=True) - m.d.comb += ClockSignal("sync").eq(clk_i) - return m - - _single_ended_io_types = [ - "PCI33", "LVTTL33", "LVCMOS33", "LVCMOS25", "LVCMOS18", "LVCMOS15", "LVCMOS12", - "LVCMOS25R33", "LVCMOS18R33", "LVCMOS18R25", "LVCMOS15R33", "LVCMOS15R25", "LVCMOS12R33", - "LVCMOS12R25", "LVCMOS10R33", "LVCMOS10R25", "SSTL25_I", "SSTL25_II", "SSTL18_I", - "SSTL18_II", "HSTL18_I", "HSTL18_II", - ] - _differential_io_types = [ - "LVDS25", "LVDS25E", "RSDS25", "RSDS25E", "BLVDS25", "BLVDS25E", "MLVDS25", "MLVDS25E", - "LVPECL33", "LVPECL33E", "SSTL25D_I", "SSTL25D_II", "SSTL18D_I", "SSTL18D_II", - "HSTL18D_I", "HSTL18D_II", "LVTTL33D", "LVCMOS33D", "LVCMOS25D", "LVCMOS18D", "LVCMOS15D", - "LVCMOS12D", "MIPI", - ] - - def should_skip_port_component(self, port, attrs, component): - # On ECP5, a differential IO is placed by only instantiating an IO buffer primitive at - # the PIOA or PIOC location, which is always the non-inverting pin. - if attrs.get("IO_TYPE", "LVCMOS25") in self._differential_io_types and component == "n": - return True - return False - - def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False): - def get_ireg(clk, d, q): - for bit in range(len(q)): - m.submodules += Instance("IFS1P3DX", - i_SCLK=clk, - i_SP=Const(1), - i_CD=Const(0), - i_D=d[bit], - o_Q=q[bit] - ) - - def get_oreg(clk, d, q): - for bit in range(len(q)): - m.submodules += Instance("OFS1P3DX", - i_SCLK=clk, - i_SP=Const(1), - i_CD=Const(0), - i_D=d[bit], - o_Q=q[bit] - ) - - def get_iddr(sclk, d, q0, q1): - for bit in range(len(d)): - m.submodules += Instance("IDDRXE", - i_SCLK=sclk, - i_RST=Const(0), - i_D=d[bit], - o_Q0=q0[bit], o_Q1=q1[bit] - ) - - def get_oddr(sclk, d0, d1, q): - for bit in range(len(q)): - m.submodules += Instance("ODDRXE", - i_SCLK=sclk, - i_RST=Const(0), - i_D0=d0[bit], i_D1=d1[bit], - o_Q=q[bit] - ) - - def get_ineg(z, invert): - if invert: - a = Signal.like(z, name_suffix="_n") - m.d.comb += z.eq(~a) - return a - else: - return z - - def get_oneg(a, invert): - if invert: - z = Signal.like(a, name_suffix="_n") - m.d.comb += z.eq(~a) - return z - else: - return a - - if "i" in pin.dir: - if pin.xdr < 2: - pin_i = get_ineg(pin.i, i_invert) - elif pin.xdr == 2: - pin_i0 = get_ineg(pin.i0, i_invert) - pin_i1 = get_ineg(pin.i1, i_invert) - if "o" in pin.dir: - if pin.xdr < 2: - pin_o = get_oneg(pin.o, o_invert) - elif pin.xdr == 2: - pin_o0 = get_oneg(pin.o0, o_invert) - pin_o1 = get_oneg(pin.o1, o_invert) - - i = o = t = None - if "i" in pin.dir: - i = Signal(pin.width, name="{}_xdr_i".format(pin.name)) - if "o" in pin.dir: - o = Signal(pin.width, name="{}_xdr_o".format(pin.name)) - if pin.dir in ("oe", "io"): - t = Signal(1, name="{}_xdr_t".format(pin.name)) - - if pin.xdr == 0: - if "i" in pin.dir: - i = pin_i - if "o" in pin.dir: - o = pin_o - if pin.dir in ("oe", "io"): - t = ~pin.oe - elif pin.xdr == 1: - # Note that currently nextpnr will not pack an FF (*FS1P3DX) into the PIO. - if "i" in pin.dir: - get_ireg(pin.i_clk, i, pin_i) - if "o" in pin.dir: - get_oreg(pin.o_clk, pin_o, o) - if pin.dir in ("oe", "io"): - get_oreg(pin.o_clk, ~pin.oe, t) - elif pin.xdr == 2: - if "i" in pin.dir: - get_iddr(pin.i_clk, i, pin_i0, pin_i1) - if "o" in pin.dir: - get_oddr(pin.o_clk, pin_o0, pin_o1, o) - if pin.dir in ("oe", "io"): - # It looks like Diamond will not pack an OREG as a tristate register in a DDR PIO. - # It is not clear what is the recommended set of primitives for this task. - # Similarly, nextpnr will not pack anything as a tristate register in a DDR PIO. - get_oreg(pin.o_clk, ~pin.oe, t) - else: - assert False - - return (i, o, t) - - def get_input(self, pin, port, attrs, invert): - self._check_feature("single-ended input", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) - for bit in range(len(port)): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", - i_I=port.io[bit], - o_O=i[bit] - ) - return m - - def get_output(self, pin, port, attrs, invert): - self._check_feature("single-ended output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(len(port)): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", - i_I=o[bit], - o_O=port.io[bit] - ) - return m - - def get_tristate(self, pin, port, attrs, invert): - self._check_feature("single-ended tristate", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(len(port)): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", - i_T=t, - i_I=o[bit], - o_O=port.io[bit] - ) - return m - - def get_input_output(self, pin, port, attrs, invert): - self._check_feature("single-ended input/output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) - for bit in range(len(port)): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", - i_T=t, - i_I=o[bit], - o_O=i[bit], - io_B=port.io[bit] - ) - return m - - def get_diff_input(self, pin, port, attrs, invert): - self._check_feature("differential input", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB", - i_I=port.p[bit], - o_O=i[bit] - ) - return m - - def get_diff_output(self, pin, port, attrs, invert): - self._check_feature("differential output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB", - i_I=o[bit], - o_O=port.p[bit], - ) - return m - - def get_diff_tristate(self, pin, port, attrs, invert): - self._check_feature("differential tristate", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ", - i_T=t, - i_I=o[bit], - o_O=port.p[bit], - ) - return m - - def get_diff_input_output(self, pin, port, attrs, invert): - self._check_feature("differential input/output", pin, attrs, - valid_xdrs=(0, 1, 2), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB", - i_T=t, - i_I=o[bit], - o_O=i[bit], - io_B=port.p[bit], - ) - return m - - # CDC primitives are not currently specialized for MachXO2/MachXO3L. - - -LatticeMachXO2Platform = LatticeMachXO2Or3LPlatform -LatticeMachXO3LPlatform = LatticeMachXO2Or3LPlatform +import warnings +warnings.warn("instead of nmigen.vendor.lattice_machxo_2_3l, use amaranth.vendor.lattice_machxo_2_3l", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/quicklogic.py b/nmigen/vendor/quicklogic.py index 8554fff..d09ef96 100644 --- a/nmigen/vendor/quicklogic.py +++ b/nmigen/vendor/quicklogic.py @@ -1,185 +1,7 @@ -from abc import abstractproperty - -from ..hdl import * -from ..lib.cdc import ResetSynchronizer -from ..build import * +from amaranth.vendor.quicklogic import * +from amaranth.vendor.quicklogic import __all__ -__all__ = ["QuicklogicPlatform"] - - -class QuicklogicPlatform(TemplatedPlatform): - """ - Symbiflow toolchain - ------------------- - - Required tools: - * ``symbiflow_synth`` - * ``symbiflow_pack`` - * ``symbiflow_place`` - * ``symbiflow_route`` - * ``symbiflow_write_fasm`` - * ``symbiflow_write_bitstream`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_QLSymbiflow``, if present. - - Available overrides: - * ``add_constraints``: inserts commands in XDC file. - """ - - device = abstractproperty() - package = abstractproperty() - - # Since the QuickLogic version of SymbiFlow toolchain is not upstreamed yet - # we should distinguish the QuickLogic version from mainline one. - # QuickLogic toolchain: https://github.com/QuickLogic-Corp/quicklogic-fpga-toolchain/releases - toolchain = "QLSymbiflow" - - required_tools = [ - "symbiflow_synth", - "symbiflow_pack", - "symbiflow_place", - "symbiflow_route", - "symbiflow_write_fasm", - "symbiflow_write_bitstream", - "symbiflow_write_openocd", - ] - file_templates = { - **TemplatedPlatform.build_script_templates, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.pcf": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_io {{port_name}} {{pin_name}} - {% endfor %} - """, - "{{name}}.xdc": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - {% for attr_name, attr_value in attrs.items() -%} - set_property {{attr_name}} {{attr_value}} [get_ports {{port_name|tcl_escape}} }] - {% endfor %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - "{{name}}.sdc": r""" - # {{autogenerated}} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - create_clock -period {{100000000/frequency}} {{port_signal.name|ascii_escape}} - {% endif %} - {% endfor %} - """ - } - command_templates = [ - r""" - {{invoke_tool("symbiflow_synth")}} - -t {{name}} - -v {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} {{file}} {% endfor %} {{name}}.v - -d {{platform.device}} - -p {{name}}.pcf - -P {{platform.package}} - -x {{name}}.xdc - """, - r""" - {{invoke_tool("symbiflow_pack")}} - -e {{name}}.eblif - -d {{platform.device}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_place")}} - -e {{name}}.eblif - -d {{platform.device}} - -p {{name}}.pcf - -n {{name}}.net - -P {{platform.package}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_route")}} - -e {{name}}.eblif - -d {{platform.device}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_write_fasm")}} - -e {{name}}.eblif - -d {{platform.device}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_write_bitstream")}} - -f {{name}}.fasm - -d {{platform.device}} - -P {{platform.package}} - -b {{name}}.bit - """, - # This should be `invoke_tool("symbiflow_write_openocd")`, but isn't because of a bug in - # the QLSymbiflow v1.3.0 toolchain release. - r""" - python3 -m quicklogic_fasm.bitstream_to_openocd - {{name}}.bit - {{name}}.openocd - --osc-freq {{platform.osc_freq}} - --fpga-clk-divider {{platform.osc_div}} - """, - ] - - # Common logic - - @property - def default_clk_constraint(self): - if self.default_clk == "sys_clk0": - return Clock(self.osc_freq / self.osc_div) - return super().default_clk_constraint - - def add_clock_constraint(self, clock, frequency): - super().add_clock_constraint(clock, frequency) - clock.attrs["keep"] = "TRUE" - - def create_missing_domain(self, name): - if name == "sync" and self.default_clk is not None: - m = Module() - if self.default_clk == "sys_clk0": - if not hasattr(self, "osc_div"): - raise ValueError("OSC divider (osc_div) must be an integer between 2 " - "and 512") - if not isinstance(self.osc_div, int) or self.osc_div < 2 or self.osc_div > 512: - raise ValueError("OSC divider (osc_div) must be an integer between 2 " - "and 512, not {!r}" - .format(self.osc_div)) - if not hasattr(self, "osc_freq"): - raise ValueError("OSC frequency (osc_freq) must be an integer between 2100000 " - "and 80000000") - if not isinstance(self.osc_freq, int) or self.osc_freq < 2100000 or self.osc_freq > 80000000: - raise ValueError("OSC frequency (osc_freq) must be an integer between 2100000 " - "and 80000000, not {!r}" - .format(self.osc_freq)) - clk_i = Signal() - sys_clk0 = Signal() - m.submodules += Instance("qlal4s3b_cell_macro", - o_Sys_Clk0=sys_clk0) - m.submodules += Instance("gclkbuff", - o_A=sys_clk0, - o_Z=clk_i) - else: - clk_i = self.request(self.default_clk).i - - if self.default_rst is not None: - rst_i = self.request(self.default_rst).i - else: - rst_i = Const(0) - - m.domains += ClockDomain("sync") - m.d.comb += ClockSignal("sync").eq(clk_i) - m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync") - return m +import warnings +warnings.warn("instead of nmigen.vendor.quicklogic, use amaranth.vendor.quicklogic", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/xilinx.py b/nmigen/vendor/xilinx.py index f332a93..d744d61 100644 --- a/nmigen/vendor/xilinx.py +++ b/nmigen/vendor/xilinx.py @@ -1,1060 +1,7 @@ -from abc import abstractproperty +from amaranth.vendor.xilinx import * +from amaranth.vendor.xilinx import __all__ -from ..hdl import * -from ..lib.cdc import ResetSynchronizer -from ..build import * - -__all__ = ["XilinxPlatform"] - - -class XilinxPlatform(TemplatedPlatform): - """ - Vivado toolchain - ---------------- - - Required tools: - * ``vivado`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Vivado``, if present. - - Available overrides: - * ``script_after_read``: inserts commands after ``read_xdc`` in Tcl script. - * ``script_after_synth``: inserts commands after ``synth_design`` in Tcl script. - * ``script_after_place``: inserts commands after ``place_design`` in Tcl script. - * ``script_after_route``: inserts commands after ``route_design`` in Tcl script. - * ``script_before_bitstream``: inserts commands before ``write_bitstream`` in Tcl script. - * ``script_after_bitstream``: inserts commands after ``write_bitstream`` in Tcl script. - * ``add_constraints``: inserts commands in XDC file. - * ``vivado_opts``: adds extra options for ``vivado``. - - Build products: - * ``{{name}}.log``: Vivado log. - * ``{{name}}_timing_synth.rpt``: Vivado report. - * ``{{name}}_utilization_hierarchical_synth.rpt``: Vivado report. - * ``{{name}}_utilization_synth.rpt``: Vivado report. - * ``{{name}}_utilization_hierarchical_place.rpt``: Vivado report. - * ``{{name}}_utilization_place.rpt``: Vivado report. - * ``{{name}}_io.rpt``: Vivado report. - * ``{{name}}_control_sets.rpt``: Vivado report. - * ``{{name}}_clock_utilization.rpt``: Vivado report. - * ``{{name}}_route_status.rpt``: Vivado report. - * ``{{name}}_drc.rpt``: Vivado report. - * ``{{name}}_methodology.rpt``: Vivado report. - * ``{{name}}_timing.rpt``: Vivado report. - * ``{{name}}_power.rpt``: Vivado report. - * ``{{name}}_route.dcp``: Vivado design checkpoint. - * ``{{name}}.bit``: binary bitstream with metadata. - * ``{{name}}.bin``: binary bitstream. - - ISE toolchain - ------------- - - Required tools: - * ``xst`` - * ``ngdbuild`` - * ``map`` - * ``par`` - * ``bitgen`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_ISE``, if present. - - Available overrides: - * ``script_after_run``: inserts commands after ``run`` in XST script. - * ``add_constraints``: inserts commands in UCF file. - * ``xst_opts``: adds extra options for ``xst``. - * ``ngdbuild_opts``: adds extra options for ``ngdbuild``. - * ``map_opts``: adds extra options for ``map``. - * ``par_opts``: adds extra options for ``par``. - * ``bitgen_opts``: adds extra and overrides default options for ``bitgen``; - default options: ``-g Compress``. - - Build products: - * ``{{name}}.srp``: synthesis report. - * ``{{name}}.ngc``: synthesized RTL. - * ``{{name}}.bld``: NGDBuild log. - * ``{{name}}.ngd``: design database. - * ``{{name}}_map.map``: MAP log. - * ``{{name}}_map.mrp``: mapping report. - * ``{{name}}_map.ncd``: mapped netlist. - * ``{{name}}.pcf``: physical constraints. - * ``{{name}}_par.par``: PAR log. - * ``{{name}}_par_pad.txt``: I/O usage report. - * ``{{name}}_par.ncd``: place and routed netlist. - * ``{{name}}.drc``: DRC report. - * ``{{name}}.bgn``: BitGen log. - * ``{{name}}.bit``: binary bitstream with metadata. - * ``{{name}}.bin``: raw binary bitstream. - - Symbiflow toolchain - ------------------- - - Required tools: - * ``symbiflow_synth`` - * ``symbiflow_pack`` - * ``symbiflow_place`` - * ``symbiflow_route`` - * ``symbiflow_write_fasm`` - * ``symbiflow_write_bitstream`` - - The environment is populated by running the script specified in the environment variable - ``NMIGEN_ENV_Symbiflow``, if present. - - Available overrides: - * ``add_constraints``: inserts commands in XDC file. - """ - - toolchain = None # selected when creating platform - - device = abstractproperty() - package = abstractproperty() - speed = abstractproperty() - - @property - def _part(self): - if self.family in {"ultrascale", "ultrascaleplus"}: - return "{}-{}-{}".format(self.device, self.package, self.speed) - else: - return "{}{}-{}".format(self.device, self.package, self.speed) - - # Vivado templates - - _vivado_required_tools = ["vivado"] - _vivado_file_templates = { - **TemplatedPlatform.build_script_templates, - "build_{{name}}.sh": r""" - # {{autogenerated}} - set -e{{verbose("x")}} - if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi - [ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}" - {{emit_commands("sh")}} - """, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.tcl": r""" - # {{autogenerated}} - create_project -force -name {{name}} -part {{platform._part}} - {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} - add_files {{file|tcl_escape}} - {% endfor %} - add_files {{name}}.v - read_xdc {{name}}.xdc - {% for file in platform.iter_files(".xdc") -%} - read_xdc {{file|tcl_escape}} - {% endfor %} - {{get_override("script_after_read")|default("# (script_after_read placeholder)")}} - synth_design -top {{name}} - foreach cell [get_cells -quiet -hier -filter {nmigen.vivado.false_path == "TRUE"}] { - set_false_path -to $cell - } - foreach cell [get_cells -quiet -hier -filter {nmigen.vivado.max_delay != ""}] { - set clock [get_clocks -of_objects \ - [all_fanin -flat -startpoints_only [get_pin $cell/D]]] - if {[llength $clock] != 0} { - set_max_delay -datapath_only -from $clock \ - -to [get_cells $cell] [get_property nmigen.vivado.max_delay $cell] - } - } - {{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}} - report_timing_summary -file {{name}}_timing_synth.rpt - report_utilization -hierarchical -file {{name}}_utilization_hierarchical_synth.rpt - report_utilization -file {{name}}_utilization_synth.rpt - opt_design - place_design - {{get_override("script_after_place")|default("# (script_after_place placeholder)")}} - report_utilization -hierarchical -file {{name}}_utilization_hierarchical_place.rpt - report_utilization -file {{name}}_utilization_place.rpt - report_io -file {{name}}_io.rpt - report_control_sets -verbose -file {{name}}_control_sets.rpt - report_clock_utilization -file {{name}}_clock_utilization.rpt - route_design - {{get_override("script_after_route")|default("# (script_after_route placeholder)")}} - phys_opt_design - report_timing_summary -no_header -no_detailed_paths - write_checkpoint -force {{name}}_route.dcp - report_route_status -file {{name}}_route_status.rpt - report_drc -file {{name}}_drc.rpt - report_methodology -file {{name}}_methodology.rpt - report_timing_summary -datasheet -max_paths 10 -file {{name}}_timing.rpt - report_power -file {{name}}_power.rpt - {{get_override("script_before_bitstream")|default("# (script_before_bitstream placeholder)")}} - write_bitstream -force -bin_file {{name}}.bit - {{get_override("script_after_bitstream")|default("# (script_after_bitstream placeholder)")}} - quit - """, - "{{name}}.xdc": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_property LOC {{pin_name}} [get_ports {{port_name|tcl_escape}}] - {% for attr_name, attr_value in attrs.items() -%} - set_property {{attr_name}} {{attr_value|tcl_escape}} [get_ports {{port_name|tcl_escape}}] - {% endfor %} - {% endfor %} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is not none -%} - create_clock -name {{port_signal.name|ascii_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}] - {% else -%} - create_clock -name {{net_signal.name|ascii_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}] - {% endif %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """ - } - _vivado_command_templates = [ - r""" - {{invoke_tool("vivado")}} - {{verbose("-verbose")}} - {{get_override("vivado_opts")|options}} - -mode batch - -log {{name}}.log - -source {{name}}.tcl - """ - ] - - # ISE toolchain - - _ise_required_tools = [ - "xst", - "ngdbuild", - "map", - "par", - "bitgen", - ] - _ise_file_templates = { - **TemplatedPlatform.build_script_templates, - "build_{{name}}.sh": r""" - # {{autogenerated}} - set -e{{verbose("x")}} - if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi - [ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}" - {{emit_commands("sh")}} - """, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.prj": r""" - # {{autogenerated}} - {% for file in platform.iter_files(".vhd", ".vhdl") -%} - vhdl work {{file}} - {% endfor %} - {% for file in platform.iter_files(".v") -%} - verilog work {{file}} - {% endfor %} - verilog work {{name}}.v - """, - "{{name}}.xst": r""" - # {{autogenerated}} - run - -ifn {{name}}.prj - -ofn {{name}}.ngc - -top {{name}} - -use_new_parser yes - -p {{platform.device}}{{platform.package}}-{{platform.speed}} - {{get_override("script_after_run")|default("# (script_after_run placeholder)")}} - """, - "{{name}}.ucf": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - {% set port_name = port_name|replace("[", "<")|replace("]", ">") -%} - NET "{{port_name}}" LOC={{pin_name}}; - {% for attr_name, attr_value in attrs.items() -%} - NET "{{port_name}}" {{attr_name}}={{attr_value}}; - {% endfor %} - {% endfor %} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - NET "{{net_signal|hierarchy("/")}}" TNM_NET="PRD{{net_signal|hierarchy("/")}}"; - TIMESPEC "TS{{net_signal|hierarchy("__")}}"=PERIOD "PRD{{net_signal|hierarchy("/")}}" {{1000000000/frequency}} ns HIGH 50%; - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """ - } - _ise_command_templates = [ - r""" - {{invoke_tool("xst")}} - {{get_override("xst_opts")|options}} - -ifn {{name}}.xst - """, - r""" - {{invoke_tool("ngdbuild")}} - {{quiet("-quiet")}} - {{verbose("-verbose")}} - {{get_override("ngdbuild_opts")|options}} - -uc {{name}}.ucf - {{name}}.ngc - """, - r""" - {{invoke_tool("map")}} - {{verbose("-detail")}} - {{get_override("map_opts")|default([])|options}} - -w - -o {{name}}_map.ncd - {{name}}.ngd - {{name}}.pcf - """, - r""" - {{invoke_tool("par")}} - {{get_override("par_opts")|default([])|options}} - -w - {{name}}_map.ncd - {{name}}_par.ncd - {{name}}.pcf - """, - r""" - {{invoke_tool("bitgen")}} - {{get_override("bitgen_opts")|default(["-g Compress"])|options}} - -w - -g Binary:Yes - {{name}}_par.ncd - {{name}}.bit - """ - ] - - # Symbiflow templates - - _symbiflow_part_map = { - "xc7a35ticsg324-1L": "xc7a35tcsg324-1", # Arty-A7 - } - - _symbiflow_required_tools = [ - "symbiflow_synth", - "symbiflow_pack", - "symbiflow_place", - "symbiflow_route", - "symbiflow_write_fasm", - "symbiflow_write_bitstream" - ] - _symbiflow_file_templates = { - **TemplatedPlatform.build_script_templates, - "{{name}}.v": r""" - /* {{autogenerated}} */ - {{emit_verilog()}} - """, - "{{name}}.debug.v": r""" - /* {{autogenerated}} */ - {{emit_debug_verilog()}} - """, - "{{name}}.pcf": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - set_io {{port_name}} {{pin_name}} - {% endfor %} - """, - "{{name}}.xdc": r""" - # {{autogenerated}} - {% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%} - {% for attr_name, attr_value in attrs.items() -%} - set_property {{attr_name}} {{attr_value}} [get_ports {{port_name|tcl_escape}} }] - {% endfor %} - {% endfor %} - {{get_override("add_constraints")|default("# (add_constraints placeholder)")}} - """, - "{{name}}.sdc": r""" - # {{autogenerated}} - {% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%} - {% if port_signal is none -%} - create_clock -period {{1000000000/frequency}} {{net_signal.name|ascii_escape}} - {% endif %} - {% endfor %} - """ - } - _symbiflow_command_templates = [ - r""" - {{invoke_tool("symbiflow_synth")}} - -t {{name}} - -v {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} {{file}} {% endfor %} {{name}}.v - -p {{platform._symbiflow_part_map.get(platform._part, platform._part)}} - -x {{name}}.xdc - """, - r""" - {{invoke_tool("symbiflow_pack")}} - -e {{name}}.eblif - -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_place")}} - -e {{name}}.eblif - -p {{name}}.pcf - -n {{name}}.net - -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_route")}} - -e {{name}}.eblif - -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} - -s {{name}}.sdc - """, - r""" - {{invoke_tool("symbiflow_write_fasm")}} - -e {{name}}.eblif - -P {{platform._symbiflow_part_map.get(platform._part, platform._part)}} - """, - r""" - {{invoke_tool("symbiflow_write_bitstream")}} - -f {{name}}.fasm - -p {{platform._symbiflow_part_map.get(platform._part, platform._part)}} - -b {{name}}.bit - """ - ] - - # Common logic - - def __init__(self, *, toolchain=None): - super().__init__() - - # Determine device family. - device = self.device.lower() - # Remove the prefix. - if device.startswith("xc"): - device = device[2:] - elif device.startswith("xa"): - device = device[2:] - elif device.startswith("xqr"): - device = device[3:] - elif device.startswith("xq"): - device = device[2:] - else: - raise ValueError("Device '{}' is not recognized".format(self.device)) - # Do actual name matching. - if device.startswith("2vp"): - self.family = "virtex2p" - elif device.startswith("2v"): - self.family = "virtex2" - elif device.startswith("3sd"): - self.family = "spartan3adsp" - elif device.startswith("3s"): - if device.endswith("a"): - self.family = "spartan3a" - elif device.endswith("e"): - self.family = "spartan3e" - else: - self.family = "spartan3" - elif device.startswith("4v"): - self.family = "virtex4" - elif device.startswith("5v"): - self.family = "virtex5" - elif device.startswith("6v"): - self.family = "virtex6" - elif device.startswith("6s"): - self.family = "spartan6" - elif device.startswith("7"): - self.family = "series7" - elif device.startswith(("vu", "ku")): - if device.endswith("p"): - self.family = "ultrascaleplus" - else: - self.family = "ultrascale" - elif device.startswith(("zu", "u", "k26")): - self.family = "ultrascaleplus" - elif device.startswith(("v", "2s")): - # Match last to avoid conflict with ultrascale. - # Yes, Spartan 2 is the same thing as Virtex. - if device.endswith("e"): - self.family = "virtexe" - else: - self.family = "virtex" - - - ISE_FAMILIES = { - "virtex", "virtexe", - "virtex2", "virtex2p", - "spartan3", "spartan3e", "spartan3a", "spartan3adsp", - "virtex4", - "virtex5", - "virtex6", - "spartan6", - } - if toolchain is None: - if self.family in ISE_FAMILIES: - toolchain = "ISE" - else: - toolchain = "Vivado" - - assert toolchain in ("Vivado", "ISE", "Symbiflow") - if toolchain == "Vivado": - if self.family in ISE_FAMILIES: - raise ValueError("Family '{}' is not supported by the Vivado toolchain, please use ISE instead".format(self.family)) - elif toolchain == "ISE": - if self.family not in ISE_FAMILIES and self.family != "series7": - raise ValueError("Family '{}' is not supported by the ISE toolchain, please use Vivado instead".format(self.family)) - elif toolchain == "Symbiflow": - if self.family != "series7": - raise ValueError("Family '{}' is not supported by the Symbiflow toolchain".format(self.family)) - self.toolchain = toolchain - - @property - def required_tools(self): - if self.toolchain == "Vivado": - return self._vivado_required_tools - if self.toolchain == "ISE": - return self._ise_required_tools - if self.toolchain == "Symbiflow": - return self._symbiflow_required_tools - assert False - - @property - def file_templates(self): - if self.toolchain == "Vivado": - return self._vivado_file_templates - if self.toolchain == "ISE": - return self._ise_file_templates - if self.toolchain == "Symbiflow": - return self._symbiflow_file_templates - assert False - - @property - def command_templates(self): - if self.toolchain == "Vivado": - return self._vivado_command_templates - if self.toolchain == "ISE": - return self._ise_command_templates - if self.toolchain == "Symbiflow": - return self._symbiflow_command_templates - assert False - - def create_missing_domain(self, name): - # Xilinx devices have a global write enable (GWE) signal that asserted during configuraiton - # and deasserted once it ends. Because it is an asynchronous signal (GWE is driven by logic - # syncronous to configuration clock, which is not used by most designs), even though it is - # a low-skew global network, its deassertion may violate a setup/hold constraint with - # relation to a user clock. The recommended solution is to use a BUFGCE driven by the EOS - # signal (if available). For details, see: - # * https://www.xilinx.com/support/answers/44174.html - # * https://www.xilinx.com/support/documentation/white_papers/wp272.pdf - - STARTUP_PRIMITIVE = { - "spartan6": "STARTUP_SPARTAN6", - "virtex4": "STARTUP_VIRTEX4", - "virtex5": "STARTUP_VIRTEX5", - "virtex6": "STARTUP_VIRTEX6", - "series7": "STARTUPE2", - "ultrascale": "STARTUPE3", - "ultrascaleplus": "STARTUPE3", - } - - if self.family not in STARTUP_PRIMITIVE or self.toolchain == "Symbiflow": - # Spartan 3 and before lacks a STARTUP primitive with EOS output; use a simple ResetSynchronizer - # in that case, as is the default. - # Symbiflow does not support the STARTUPE2 primitive. - return super().create_missing_domain(name) - - if name == "sync" and self.default_clk is not None: - clk_i = self.request(self.default_clk).i - if self.default_rst is not None: - rst_i = self.request(self.default_rst).i - - m = Module() - ready = Signal() - m.submodules += Instance(STARTUP_PRIMITIVE[self.family], o_EOS=ready) - m.domains += ClockDomain("sync", reset_less=self.default_rst is None) - if self.toolchain != "Vivado": - m.submodules += Instance("BUFGCE", i_CE=ready, i_I=clk_i, o_O=ClockSignal("sync")) - elif self.family == "series7": - # Actually use BUFGCTRL configured as BUFGCE, since using BUFGCE causes - # sim/synth mismatches with Vivado 2019.2, and the suggested workaround - # (SIM_DEVICE parameter) breaks Vivado 2017.4. - m.submodules += Instance("BUFGCTRL", - p_SIM_DEVICE="7SERIES", - i_I0=clk_i, i_S0=C(1, 1), i_CE0=ready, i_IGNORE0=C(0, 1), - i_I1=C(1, 1), i_S1=C(0, 1), i_CE1=C(0, 1), i_IGNORE1=C(1, 1), - o_O=ClockSignal("sync") - ) - else: - m.submodules += Instance("BUFGCE", - p_SIM_DEVICE="ULTRASCALE", - i_CE=ready, - i_I=clk_i, - o_O=ClockSignal("sync") - ) - if self.default_rst is not None: - m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync") - return m - - def add_clock_constraint(self, clock, frequency): - super().add_clock_constraint(clock, frequency) - clock.attrs["keep"] = "TRUE" - - def _get_xdr_buffer(self, m, pin, iostd, *, i_invert=False, o_invert=False): - XFDDR_FAMILIES = { - "virtex2", - "virtex2p", - "spartan3", - } - XDDR2_FAMILIES = { - "spartan3e", - "spartan3a", - "spartan3adsp", - "spartan6", - } - XDDR_FAMILIES = { - "virtex4", - "virtex5", - "virtex6", - "series7", - } - XDDRE1_FAMILIES = { - "ultrascale", - "ultrascaleplus", - } - - def get_iob_dff(clk, d, q): - # SDR I/O is performed by packing a flip-flop into the pad IOB. - for bit in range(len(q)): - m.submodules += Instance("FDCE", - a_IOB="TRUE", - i_C=clk, - i_CE=Const(1), - i_CLR=Const(0), - i_D=d[bit], - o_Q=q[bit] - ) - - def get_dff(clk, d, q): - for bit in range(len(q)): - m.submodules += Instance("FDCE", - i_C=clk, - i_CE=Const(1), - i_CLR=Const(0), - i_D=d[bit], - o_Q=q[bit] - ) - - def get_ifddr(clk, io, q0, q1): - assert self.family in XFDDR_FAMILIES - for bit in range(len(q0)): - m.submodules += Instance("IFDDRCPE", - i_C0=clk, i_C1=~clk, - i_CE=Const(1), - i_CLR=Const(0), i_PRE=Const(0), - i_D=io[bit], - o_Q0=q0[bit], o_Q1=q1[bit] - ) - - def get_iddr2(clk, d, q0, q1, alignment): - assert self.family in XDDR2_FAMILIES - for bit in range(len(q0)): - m.submodules += Instance("IDDR2", - p_DDR_ALIGNMENT=alignment, - p_SRTYPE="ASYNC", - p_INIT_Q0=C(0, 1), p_INIT_Q1=C(0, 1), - i_C0=clk, i_C1=~clk, - i_CE=Const(1), - i_S=Const(0), i_R=Const(0), - i_D=d[bit], - o_Q0=q0[bit], o_Q1=q1[bit] - ) - - def get_iddr(clk, d, q1, q2): - assert self.family in XDDR_FAMILIES or self.family in XDDRE1_FAMILIES - for bit in range(len(q1)): - if self.family in XDDR_FAMILIES: - m.submodules += Instance("IDDR", - p_DDR_CLK_EDGE="SAME_EDGE_PIPELINED", - p_SRTYPE="ASYNC", - p_INIT_Q1=C(0, 1), p_INIT_Q2=C(0, 1), - i_C=clk, - i_CE=Const(1), - i_S=Const(0), i_R=Const(0), - i_D=d[bit], - o_Q1=q1[bit], o_Q2=q2[bit] - ) - else: - m.submodules += Instance("IDDRE1", - p_DDR_CLK_EDGE="SAME_EDGE_PIPELINED", - p_IS_C_INVERTED=C(0, 1), p_IS_CB_INVERTED=C(1, 1), - i_C=clk, i_CB=clk, - i_R=Const(0), - i_D=d[bit], - o_Q1=q1[bit], o_Q2=q2[bit] - ) - - def get_fddr(clk, d0, d1, q): - for bit in range(len(q)): - if self.family in XFDDR_FAMILIES: - m.submodules += Instance("FDDRCPE", - i_C0=clk, i_C1=~clk, - i_CE=Const(1), - i_PRE=Const(0), i_CLR=Const(0), - i_D0=d0[bit], i_D1=d1[bit], - o_Q=q[bit] - ) - else: - m.submodules += Instance("ODDR2", - p_DDR_ALIGNMENT="NONE", - p_SRTYPE="ASYNC", - p_INIT=C(0, 1), - i_C0=clk, i_C1=~clk, - i_CE=Const(1), - i_S=Const(0), i_R=Const(0), - i_D0=d0[bit], i_D1=d1[bit], - o_Q=q[bit] - ) - - def get_oddr(clk, d1, d2, q): - for bit in range(len(q)): - if self.family in XDDR2_FAMILIES: - m.submodules += Instance("ODDR2", - p_DDR_ALIGNMENT="C0", - p_SRTYPE="ASYNC", - p_INIT=C(0, 1), - i_C0=clk, i_C1=~clk, - i_CE=Const(1), - i_S=Const(0), i_R=Const(0), - i_D0=d1[bit], i_D1=d2[bit], - o_Q=q[bit] - ) - elif self.family in XDDR_FAMILIES: - m.submodules += Instance("ODDR", - p_DDR_CLK_EDGE="SAME_EDGE", - p_SRTYPE="ASYNC", - p_INIT=C(0, 1), - i_C=clk, - i_CE=Const(1), - i_S=Const(0), i_R=Const(0), - i_D1=d1[bit], i_D2=d2[bit], - o_Q=q[bit] - ) - elif self.family in XDDRE1_FAMILIES: - m.submodules += Instance("ODDRE1", - p_SRVAL=C(0, 1), - i_C=clk, - i_SR=Const(0), - i_D1=d1[bit], i_D2=d2[bit], - o_Q=q[bit] - ) - - def get_ineg(y, invert): - if invert: - a = Signal.like(y, name_suffix="_n") - m.d.comb += y.eq(~a) - return a - else: - return y - - def get_oneg(a, invert): - if invert: - y = Signal.like(a, name_suffix="_n") - m.d.comb += y.eq(~a) - return y - else: - return a - - if "i" in pin.dir: - if pin.xdr < 2: - pin_i = get_ineg(pin.i, i_invert) - elif pin.xdr == 2: - pin_i0 = get_ineg(pin.i0, i_invert) - pin_i1 = get_ineg(pin.i1, i_invert) - if "o" in pin.dir: - if pin.xdr < 2: - pin_o = get_oneg(pin.o, o_invert) - elif pin.xdr == 2: - pin_o0 = get_oneg(pin.o0, o_invert) - pin_o1 = get_oneg(pin.o1, o_invert) - - i = o = t = None - if "i" in pin.dir: - i = Signal(pin.width, name="{}_xdr_i".format(pin.name)) - if "o" in pin.dir: - o = Signal(pin.width, name="{}_xdr_o".format(pin.name)) - if pin.dir in ("oe", "io"): - t = Signal(1, name="{}_xdr_t".format(pin.name)) - - if pin.xdr == 0: - if "i" in pin.dir: - i = pin_i - if "o" in pin.dir: - o = pin_o - if pin.dir in ("oe", "io"): - t = ~pin.oe - elif pin.xdr == 1: - if "i" in pin.dir: - get_iob_dff(pin.i_clk, i, pin_i) - if "o" in pin.dir: - get_iob_dff(pin.o_clk, pin_o, o) - if pin.dir in ("oe", "io"): - get_iob_dff(pin.o_clk, ~pin.oe, t) - elif pin.xdr == 2: - # On Spartan 3E/3A, the situation with DDR registers is messy: while the hardware - # supports same-edge alignment, it does so by borrowing the resources of the other - # pin in the differential pair (if any). Since we cannot be sure if the other pin - # is actually unused (or if the pin is even part of a differential pair in the first - # place), we only use the hardware alignment feature in two cases: - # - # - differential inputs (since the other pin's input registers will be unused) - # - true differential outputs (since they use only one pin's output registers, - # as opposed to pseudo-differential outputs that use both) - TRUE_DIFF_S3EA = { - "LVDS_33", "LVDS_25", - "MINI_LVDS_33", "MINI_LVDS_25", - "RSDS_33", "RSDS_25", - "PPDS_33", "PPDS_25", - "TMDS_33", - } - DIFF_S3EA = TRUE_DIFF_S3EA | { - "DIFF_HSTL_I", - "DIFF_HSTL_III", - "DIFF_HSTL_I_18", - "DIFF_HSTL_II_18", - "DIFF_HSTL_III_18", - "DIFF_SSTL3_I", - "DIFF_SSTL3_II", - "DIFF_SSTL2_I", - "DIFF_SSTL2_II", - "DIFF_SSTL18_I", - "DIFF_SSTL18_II", - "BLVDS_25", - } - if "i" in pin.dir: - if self.family in XFDDR_FAMILIES: - # First-generation input DDR register: basically just two FFs with opposite - # clocks. Add a register on both outputs, so that they enter fabric on - # the same clock edge, adding one cycle of latency. - i0_ff = Signal.like(pin_i0, name_suffix="_ff") - i1_ff = Signal.like(pin_i1, name_suffix="_ff") - get_dff(pin.i_clk, i0_ff, pin_i0) - get_dff(pin.i_clk, i1_ff, pin_i1) - get_iob_dff(pin.i_clk, i, i0_ff) - get_iob_dff(~pin.i_clk, i, i1_ff) - elif self.family in XDDR2_FAMILIES: - if self.family == 'spartan6' or iostd in DIFF_S3EA: - # Second-generation input DDR register: hw realigns i1 to positive clock edge, - # but also misaligns it with i0 input. Re-register first input before it - # enters fabric. This allows both inputs to enter fabric on the same clock - # edge, and adds one cycle of latency. - i0_ff = Signal.like(pin_i0, name_suffix="_ff") - get_dff(pin.i_clk, i0_ff, pin_i0) - get_iddr2(pin.i_clk, i, i0_ff, pin_i1, "C0") - else: - # No extra register available for hw alignment, use extra registers. - i0_ff = Signal.like(pin_i0, name_suffix="_ff") - i1_ff = Signal.like(pin_i1, name_suffix="_ff") - get_dff(pin.i_clk, i0_ff, pin_i0) - get_dff(pin.i_clk, i1_ff, pin_i1) - get_iddr2(pin.i_clk, i, i0_ff, i1_ff, "NONE") - else: - # Third-generation input DDR register: does all of the above on its own. - get_iddr(pin.i_clk, i, pin_i0, pin_i1) - if "o" in pin.dir: - if self.family in XFDDR_FAMILIES or self.family == "spartan3e" or (self.family.startswith("spartan3a") and iostd not in TRUE_DIFF_S3EA): - # For this generation, we need to realign o1 input ourselves. - o1_ff = Signal.like(pin_o1, name_suffix="_ff") - get_dff(pin.o_clk, pin_o1, o1_ff) - get_fddr(pin.o_clk, pin_o0, o1_ff, o) - else: - get_oddr(pin.o_clk, pin_o0, pin_o1, o) - if pin.dir in ("oe", "io"): - if self.family == "spartan6": - get_oddr(pin.o_clk, ~pin.oe, ~pin.oe, t) - else: - get_iob_dff(pin.o_clk, ~pin.oe, t) - else: - assert False - - return (i, o, t) - - def _get_valid_xdrs(self): - if self.family in {"virtex", "virtexe"}: - return (0, 1) - else: - return (0, 1, 2) - - def get_input(self, pin, port, attrs, invert): - self._check_feature("single-ended input", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), i_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IBUF", - i_I=port.io[bit], - o_O=i[bit] - ) - return m - - def get_output(self, pin, port, attrs, invert): - self._check_feature("single-ended output", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), o_invert=invert) - if self.toolchain != "Symbiflow": - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUF", - i_I=o[bit], - o_O=port.io[bit] - ) - else: - m.d.comb += port.eq(self._invert_if(invert, o)) - return m - - def get_tristate(self, pin, port, attrs, invert): - if self.toolchain == "Symbiflow": - return super().get_tristate(pin, port, attrs, invert) - - self._check_feature("single-ended tristate", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFT", - i_T=t, - i_I=o[bit], - o_O=port.io[bit] - ) - return m - - def get_input_output(self, pin, port, attrs, invert): - if self.toolchain == "Symbiflow": - return super().get_input_output(pin, port, attrs, invert) - - self._check_feature("single-ended input/output", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD"), i_invert=invert, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IOBUF", - i_T=t, - i_I=o[bit], - o_O=i[bit], - io_IO=port.io[bit] - ) - return m - - def get_diff_input(self, pin, port, attrs, invert): - if self.toolchain == "Symbiflow": - return super().get_diff_input(pin, port, attrs, invert) - - self._check_feature("differential input", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), i_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IBUFDS", - i_I=port.p[bit], i_IB=port.n[bit], - o_O=i[bit] - ) - return m - - def get_diff_output(self, pin, port, attrs, invert): - if self.toolchain == "Symbiflow": - return super().get_diff_output(pin, port, attrs, invert) - - self._check_feature("differential output", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFDS", - i_I=o[bit], - o_O=port.p[bit], o_OB=port.n[bit] - ) - return m - - def get_diff_tristate(self, pin, port, attrs, invert): - if self.toolchain == "Symbiflow": - return super().get_diff_tristate(pin, port, attrs, invert) - - self._check_feature("differential tristate", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBUFTDS", - i_T=t, - i_I=o[bit], - o_O=port.p[bit], o_OB=port.n[bit] - ) - return m - - def get_diff_input_output(self, pin, port, attrs, invert): - if self.toolchain == "Symbiflow": - return super().get_diff_input_output(pin, port, attrs, invert) - - self._check_feature("differential input/output", pin, attrs, - valid_xdrs=self._get_valid_xdrs(), valid_attrs=True) - m = Module() - i, o, t = self._get_xdr_buffer(m, pin, attrs.get("IOSTANDARD", "LVDS_25"), i_invert=invert, o_invert=invert) - for bit in range(pin.width): - m.submodules["{}_{}".format(pin.name, bit)] = Instance("IOBUFDS", - i_T=t, - i_I=o[bit], - o_O=i[bit], - io_IO=port.p[bit], io_IOB=port.n[bit] - ) - return m - - # The synchronizer implementations below apply two separate but related timing constraints. - # - # First, the ASYNC_REG attribute prevents inference of shift registers from synchronizer FFs, - # and constraints the FFs to be placed as close as possible, ideally in one CLB. This attribute - # only affects the synchronizer FFs themselves. - # - # Second, for Vivado only, the nmigen.vivado.false_path or nmigen.vivado.max_delay attribute - # affects the path into the synchronizer. If maximum input delay is specified, a datapath-only - # maximum delay constraint is applied, limiting routing delay (and therefore skew) at - # the synchronizer input. Otherwise, a false path constraint is used to omit the input path - # from the timing analysis. - - def get_ff_sync(self, ff_sync): - m = Module() - flops = [Signal(ff_sync.i.shape(), name="stage{}".format(index), - reset=ff_sync._reset, reset_less=ff_sync._reset_less, - attrs={"ASYNC_REG": "TRUE"}) - for index in range(ff_sync._stages)] - if self.toolchain == "Vivado": - if ff_sync._max_input_delay is None: - flops[0].attrs["nmigen.vivado.false_path"] = "TRUE" - else: - flops[0].attrs["nmigen.vivado.max_delay"] = str(ff_sync._max_input_delay * 1e9) - elif ff_sync._max_input_delay is not None: - raise NotImplementedError("Platform '{}' does not support constraining input delay " - "for FFSynchronizer" - .format(type(self).__name__)) - for i, o in zip((ff_sync.i, *flops), flops): - m.d[ff_sync._o_domain] += o.eq(i) - m.d.comb += ff_sync.o.eq(flops[-1]) - return m - - - def get_async_ff_sync(self, async_ff_sync): - m = Module() - m.domains += ClockDomain("async_ff", async_reset=True, local=True) - flops = [Signal(1, name="stage{}".format(index), reset=1, - attrs={"ASYNC_REG": "TRUE"}) - for index in range(async_ff_sync._stages)] - if self.toolchain == "Vivado": - if async_ff_sync._max_input_delay is None: - flops[0].attrs["nmigen.vivado.false_path"] = "TRUE" - else: - flops[0].attrs["nmigen.vivado.max_delay"] = str(async_ff_sync._max_input_delay * 1e9) - elif async_ff_sync._max_input_delay is not None: - raise NotImplementedError("Platform '{}' does not support constraining input delay " - "for AsyncFFSynchronizer" - .format(type(self).__name__)) - for i, o in zip((0, *flops), flops): - m.d.async_ff += o.eq(i) - - if async_ff_sync._edge == "pos": - m.d.comb += ResetSignal("async_ff").eq(async_ff_sync.i) - else: - m.d.comb += ResetSignal("async_ff").eq(~async_ff_sync.i) - - m.d.comb += [ - ClockSignal("async_ff").eq(ClockSignal(async_ff_sync._o_domain)), - async_ff_sync.o.eq(flops[-1]) - ] - - return m +import warnings +warnings.warn("instead of nmigen.vendor.xilinx, use amaranth.vendor.xilinx", + DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/xilinx_7series.py b/nmigen/vendor/xilinx_7series.py index df1b5d8..3e92b30 100644 --- a/nmigen/vendor/xilinx_7series.py +++ b/nmigen/vendor/xilinx_7series.py @@ -1,15 +1,7 @@ +from amaranth.vendor.xilinx_7series import * +from amaranth.vendor.xilinx_7series import __all__ + + import warnings - -from .xilinx import XilinxPlatform - - -__all__ = ["Xilinx7SeriesPlatform"] - - -Xilinx7SeriesPlatform = XilinxPlatform - - -# TODO(nmigen-0.4): remove -warnings.warn("instead of nmigen.vendor.xilinx_7series.Xilinx7SeriesPlatform, " - "use nmigen.vendor.xilinx.XilinxPlatform", +warnings.warn("instead of nmigen.vendor.xilinx_7series, use amaranth.vendor.xilinx_7series", DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/xilinx_spartan_3_6.py b/nmigen/vendor/xilinx_spartan_3_6.py index c55905f..c689e79 100644 --- a/nmigen/vendor/xilinx_spartan_3_6.py +++ b/nmigen/vendor/xilinx_spartan_3_6.py @@ -1,16 +1,7 @@ +from amaranth.vendor.xilinx_spartan_3_6 import * +from amaranth.vendor.xilinx_spartan_3_6 import __all__ + + import warnings - -from .xilinx import XilinxPlatform - - -__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"] - - -XilinxSpartan3APlatform = XilinxPlatform -XilinxSpartan6Platform = XilinxPlatform - - -# TODO(nmigen-0.4): remove -warnings.warn("instead of nmigen.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and " - ".XilinxSpartan6Platform, use nmigen.vendor.xilinx.XilinxPlatform", +warnings.warn("instead of nmigen.vendor.xilinx_spartan_3_6, use amaranth.vendor.xilinx_spartan_3_6", DeprecationWarning, stacklevel=2) diff --git a/nmigen/vendor/xilinx_ultrascale.py b/nmigen/vendor/xilinx_ultrascale.py index ead8d93..d38221b 100644 --- a/nmigen/vendor/xilinx_ultrascale.py +++ b/nmigen/vendor/xilinx_ultrascale.py @@ -1,15 +1,7 @@ +from amaranth.vendor.xilinx_ultrascale import * +from amaranth.vendor.xilinx_ultrascale import __all__ + + import warnings - -from .xilinx import XilinxPlatform - - -__all__ = ["XilinxUltraScalePlatform"] - - -XilinxUltraScalePlatform = XilinxPlatform - - -# TODO(nmigen-0.4): remove -warnings.warn("instead of nmigen.vendor.xilinx_ultrascale.XilinxUltraScalePlatform, " - "use nmigen.vendor.xilinx.XilinxPlatform", +warnings.warn("instead of nmigen.vendor.xilinx_ultrascale, use amaranth.vendor.xilinx_ultrascale", DeprecationWarning, stacklevel=2) diff --git a/setup.py b/setup.py index 4cd96e7..8e2d690 100644 --- a/setup.py +++ b/setup.py @@ -30,35 +30,36 @@ def doc_version(): setup( - name="nmigen", + name="amaranth", use_scm_version=scm_version(), author="whitequark", author_email="whitequark@whitequark.org", - description="Python toolbox for building complex digital hardware", + description="Amaranth hardware definition language", #long_description="""TODO""", license="BSD", python_requires="~=3.6", setup_requires=["wheel", "setuptools", "setuptools_scm"], install_requires=[ - "importlib_metadata; python_version<'3.8'", # for __version__ and nmigen._yosys - "importlib_resources; python_version<'3.9'", # for nmigen._yosys - "pyvcd~=0.2.2", # for nmigen.pysim - "Jinja2~=2.11", # for nmigen.build + "importlib_metadata; python_version<'3.8'", # for __version__ and amaranth._toolchain.yosys + "importlib_resources; python_version<'3.9'", # for amaranth._toolchain.yosys + "pyvcd~=0.2.2", # for amaranth.pysim + "Jinja2~=2.11", # for amaranth.build ], extras_require={ - # this version requirement needs to be synchronized with the one in nmigen.back.verilog! - "builtin-yosys": ["nmigen-yosys>=0.9.post3527.*"], + # this version requirement needs to be synchronized with the one in amaranth.back.verilog! + "builtin-yosys": ["amaranth-yosys>=0.9.post3527.*"], "remote-build": ["paramiko~=2.7"], }, - packages=find_packages(exclude=["tests*"]), + packages=find_packages(exclude=("tests", "tests.*")), entry_points={ "console_scripts": [ + "amaranth-rpc = amaranth.rpc:main", "nmigen-rpc = nmigen.rpc:main", ] }, project_urls={ - "Documentation": "https://nmigen.info/nmigen/{}".format(doc_version()), - "Source Code": "https://github.com/nmigen/nmigen", - "Bug Tracker": "https://github.com/nmigen/nmigen/issues", + "Documentation": "https://amaranth-lang.org/amaranth/{}".format(doc_version()), + "Source Code": "https://github.com/amaranth-lang/amaranth", + "Bug Tracker": "https://github.com/amaranth-lang/amaranth/issues", }, ) diff --git a/tests/compat/support.py b/tests/compat/support.py index 05923f8..1109fd0 100644 --- a/tests/compat/support.py +++ b/tests/compat/support.py @@ -1,6 +1,6 @@ -from nmigen.compat import * -from nmigen.compat.fhdl import verilog -from nmigen._utils import _ignore_deprecated +from amaranth.compat import * +from amaranth.compat.fhdl import verilog +from amaranth._utils import _ignore_deprecated class SimCase: diff --git a/tests/compat/test_coding.py b/tests/compat/test_coding.py index a65dea6..e72e65e 100644 --- a/tests/compat/test_coding.py +++ b/tests/compat/test_coding.py @@ -1,9 +1,9 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no import unittest -from nmigen.compat import * -from nmigen.compat.genlib.coding import * +from amaranth.compat import * +from amaranth.compat.genlib.coding import * from .support import SimCase diff --git a/tests/compat/test_constant.py b/tests/compat/test_constant.py index 238c723..91d4119 100644 --- a/tests/compat/test_constant.py +++ b/tests/compat/test_constant.py @@ -1,6 +1,6 @@ import unittest -from nmigen.compat import * +from amaranth.compat import * from .support import SimCase diff --git a/tests/compat/test_fifo.py b/tests/compat/test_fifo.py index 6cff7dc..7efddc5 100644 --- a/tests/compat/test_fifo.py +++ b/tests/compat/test_fifo.py @@ -1,8 +1,8 @@ import unittest from itertools import count -from nmigen.compat import * -from nmigen.compat.genlib.fifo import SyncFIFO +from amaranth.compat import * +from amaranth.compat.genlib.fifo import SyncFIFO from .support import SimCase diff --git a/tests/compat/test_fsm.py b/tests/compat/test_fsm.py index 061dc3e..c165741 100644 --- a/tests/compat/test_fsm.py +++ b/tests/compat/test_fsm.py @@ -1,7 +1,7 @@ import unittest -from nmigen.compat import * -from nmigen.compat.genlib.fsm import FSM +from amaranth.compat import * +from amaranth.compat.genlib.fsm import FSM from .support import SimCase diff --git a/tests/compat/test_passive.py b/tests/compat/test_passive.py index c030f70..aced780 100644 --- a/tests/compat/test_passive.py +++ b/tests/compat/test_passive.py @@ -1,6 +1,6 @@ import unittest -from nmigen.compat import * +from amaranth.compat import * class PassiveCase(unittest.TestCase): diff --git a/tests/compat/test_run_simulation.py b/tests/compat/test_run_simulation.py index 0857e75..04e095d 100644 --- a/tests/compat/test_run_simulation.py +++ b/tests/compat/test_run_simulation.py @@ -1,12 +1,12 @@ import unittest -from nmigen import Signal, Module, Elaboratable +from amaranth import Signal, Module, Elaboratable from .support import SimCase class RunSimulation(SimCase, unittest.TestCase): - """ test for https://github.com/nmigen/nmigen/issues/344 """ + """ test for https://github.com/amaranth-lang/amaranth/issues/344 """ class TestBench(Elaboratable): def __init__(self): diff --git a/tests/compat/test_signed.py b/tests/compat/test_signed.py index f0a8c61..0b4bbd2 100644 --- a/tests/compat/test_signed.py +++ b/tests/compat/test_signed.py @@ -1,6 +1,6 @@ import unittest -from nmigen.compat import * +from amaranth.compat import * from .support import SimCase diff --git a/tests/compat/test_size.py b/tests/compat/test_size.py index 8211aa9..d4009b5 100644 --- a/tests/compat/test_size.py +++ b/tests/compat/test_size.py @@ -1,7 +1,7 @@ import unittest -from nmigen._utils import _ignore_deprecated -from nmigen.compat import * +from amaranth._utils import _ignore_deprecated +from amaranth.compat import * def _same_slices(a, b): diff --git a/tests/test_build_dsl.py b/tests/test_build_dsl.py index 580b962..9e6e29d 100644 --- a/tests/test_build_dsl.py +++ b/tests/test_build_dsl.py @@ -1,6 +1,6 @@ from collections import OrderedDict -from nmigen.build.dsl import * +from amaranth.build.dsl import * from .utils import * diff --git a/tests/test_build_plat.py b/tests/test_build_plat.py index 76a5331..0f9c016 100644 --- a/tests/test_build_plat.py +++ b/tests/test_build_plat.py @@ -1,5 +1,5 @@ -from nmigen import * -from nmigen.build.plat import * +from amaranth import * +from amaranth.build.plat import * from .utils import * diff --git a/tests/test_build_res.py b/tests/test_build_res.py index ac8f7bd..bc2f8e7 100644 --- a/tests/test_build_res.py +++ b/tests/test_build_res.py @@ -1,10 +1,10 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no -from nmigen import * -from nmigen.hdl.rec import * -from nmigen.lib.io import * -from nmigen.build.dsl import * -from nmigen.build.res import * +from amaranth import * +from amaranth.hdl.rec import * +from amaranth.lib.io import * +from amaranth.build.dsl import * +from amaranth.build.res import * from .utils import * diff --git a/tests/test_compat.py b/tests/test_compat.py index aeb1617..f97db7c 100644 --- a/tests/test_compat.py +++ b/tests/test_compat.py @@ -1,5 +1,5 @@ -from nmigen.hdl.ir import Fragment -from nmigen.compat import * +from amaranth.hdl.ir import Fragment +from amaranth.compat import * from .utils import * diff --git a/tests/test_hdl_ast.py b/tests/test_hdl_ast.py index 90f3d26..267eb49 100644 --- a/tests/test_hdl_ast.py +++ b/tests/test_hdl_ast.py @@ -1,7 +1,7 @@ import warnings from enum import Enum -from nmigen.hdl.ast import * +from amaranth.hdl.ast import * from .utils import * @@ -152,7 +152,7 @@ class ValueTestCase(FHDLTestCase): c = Const(0) self.assertIs(Value.cast(c), c) with self.assertRaisesRegex(TypeError, - r"^Object 'str' cannot be converted to an nMigen value$"): + r"^Object 'str' cannot be converted to an Amaranth value$"): Value.cast("str") def test_cast_enum(self): @@ -170,7 +170,7 @@ class ValueTestCase(FHDLTestCase): def test_bool(self): with self.assertRaisesRegex(TypeError, - r"^Attempted to convert nMigen value to Python boolean$"): + r"^Attempted to convert Amaranth value to Python boolean$"): if Const(0): pass diff --git a/tests/test_hdl_cd.py b/tests/test_hdl_cd.py index 51cf845..7cb3508 100644 --- a/tests/test_hdl_cd.py +++ b/tests/test_hdl_cd.py @@ -1,4 +1,4 @@ -from nmigen.hdl.cd import * +from amaranth.hdl.cd import * from .utils import * diff --git a/tests/test_hdl_dsl.py b/tests/test_hdl_dsl.py index 5156eab..c9f116d 100644 --- a/tests/test_hdl_dsl.py +++ b/tests/test_hdl_dsl.py @@ -1,11 +1,11 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no from collections import OrderedDict from enum import Enum -from nmigen.hdl.ast import * -from nmigen.hdl.cd import * -from nmigen.hdl.dsl import * +from amaranth.hdl.ast import * +from amaranth.hdl.cd import * +from amaranth.hdl.dsl import * from .utils import * diff --git a/tests/test_hdl_ir.py b/tests/test_hdl_ir.py index c578bbe..e774f5b 100644 --- a/tests/test_hdl_ir.py +++ b/tests/test_hdl_ir.py @@ -1,11 +1,11 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no from collections import OrderedDict -from nmigen.hdl.ast import * -from nmigen.hdl.cd import * -from nmigen.hdl.ir import * -from nmigen.hdl.mem import * +from amaranth.hdl.ast import * +from amaranth.hdl.cd import * +from amaranth.hdl.ir import * +from amaranth.hdl.mem import * from .utils import * diff --git a/tests/test_hdl_mem.py b/tests/test_hdl_mem.py index ef0a885..ebb90dd 100644 --- a/tests/test_hdl_mem.py +++ b/tests/test_hdl_mem.py @@ -1,7 +1,7 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no -from nmigen.hdl.ast import * -from nmigen.hdl.mem import * +from amaranth.hdl.ast import * +from amaranth.hdl.mem import * from .utils import * diff --git a/tests/test_hdl_rec.py b/tests/test_hdl_rec.py index 4b14cb6..6eb4d3c 100644 --- a/tests/test_hdl_rec.py +++ b/tests/test_hdl_rec.py @@ -1,7 +1,7 @@ from enum import Enum -from nmigen.hdl.ast import * -from nmigen.hdl.rec import * +from amaranth.hdl.ast import * +from amaranth.hdl.rec import * from .utils import * @@ -221,7 +221,7 @@ class RecordTestCase(FHDLTestCase): # __bool__ with self.assertRaisesRegex(TypeError, - r"^Attempted to convert nMigen value to Python boolean$"): + r"^Attempted to convert Amaranth value to Python boolean$"): not r1 # __invert__, __neg__ diff --git a/tests/test_hdl_xfrm.py b/tests/test_hdl_xfrm.py index cd1c0a3..ec3ebc1 100644 --- a/tests/test_hdl_xfrm.py +++ b/tests/test_hdl_xfrm.py @@ -1,12 +1,12 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no import warnings -from nmigen.hdl.ast import * -from nmigen.hdl.cd import * -from nmigen.hdl.ir import * -from nmigen.hdl.xfrm import * -from nmigen.hdl.mem import * +from amaranth.hdl.ast import * +from amaranth.hdl.cd import * +from amaranth.hdl.ir import * +from amaranth.hdl.xfrm import * +from amaranth.hdl.mem import * from .utils import * diff --git a/tests/test_lib_cdc.py b/tests/test_lib_cdc.py index 8243636..0540be2 100644 --- a/tests/test_lib_cdc.py +++ b/tests/test_lib_cdc.py @@ -1,8 +1,8 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no -from nmigen.hdl import * -from nmigen.sim import * -from nmigen.lib.cdc import * +from amaranth.hdl import * +from amaranth.sim import * +from amaranth.lib.cdc import * from .utils import * diff --git a/tests/test_lib_coding.py b/tests/test_lib_coding.py index f981a13..e2f875d 100644 --- a/tests/test_lib_coding.py +++ b/tests/test_lib_coding.py @@ -1,7 +1,7 @@ -from nmigen.hdl import * -from nmigen.asserts import * -from nmigen.sim import * -from nmigen.lib.coding import * +from amaranth.hdl import * +from amaranth.asserts import * +from amaranth.sim import * +from amaranth.lib.coding import * from .utils import * diff --git a/tests/test_lib_fifo.py b/tests/test_lib_fifo.py index 0e322c1..18515ce 100644 --- a/tests/test_lib_fifo.py +++ b/tests/test_lib_fifo.py @@ -1,9 +1,9 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no -from nmigen.hdl import * -from nmigen.asserts import * -from nmigen.sim import * -from nmigen.lib.fifo import * +from amaranth.hdl import * +from amaranth.asserts import * +from amaranth.sim import * +from amaranth.lib.fifo import * from .utils import * diff --git a/tests/test_lib_io.py b/tests/test_lib_io.py index 5970650..5043060 100644 --- a/tests/test_lib_io.py +++ b/tests/test_lib_io.py @@ -1,7 +1,7 @@ -from nmigen.hdl import * -from nmigen.hdl.rec import * -from nmigen.sim import * -from nmigen.lib.io import * +from amaranth.hdl import * +from amaranth.hdl.rec import * +from amaranth.sim import * +from amaranth.lib.io import * from .utils import * diff --git a/tests/test_lib_scheduler.py b/tests/test_lib_scheduler.py index a3780ed..c979d1c 100644 --- a/tests/test_lib_scheduler.py +++ b/tests/test_lib_scheduler.py @@ -1,11 +1,11 @@ -# nmigen: UnusedElaboratable=no +# amaranth: UnusedElaboratable=no import unittest -from nmigen.hdl import * -from nmigen.asserts import * -from nmigen.sim import * -from nmigen.lib.scheduler import * +from amaranth.hdl import * +from amaranth.asserts import * +from amaranth.sim import * +from amaranth.lib.scheduler import * from .utils import * diff --git a/tests/test_sim.py b/tests/test_sim.py index e4bd5c8..98d63c0 100644 --- a/tests/test_sim.py +++ b/tests/test_sim.py @@ -1,14 +1,14 @@ import os from contextlib import contextmanager -from nmigen._utils import flatten -from nmigen.hdl.ast import * -from nmigen.hdl.cd import * -from nmigen.hdl.mem import * -from nmigen.hdl.rec import * -from nmigen.hdl.dsl import * -from nmigen.hdl.ir import * -from nmigen.sim import * +from amaranth._utils import flatten +from amaranth.hdl.ast import * +from amaranth.hdl.cd import * +from amaranth.hdl.mem import * +from amaranth.hdl.rec import * +from amaranth.hdl.dsl import * +from amaranth.hdl.ir import * +from amaranth.sim import * from .utils import * diff --git a/tests/test_toolchain_cxx.py b/tests/test_toolchain_cxx.py index 01021e2..48f674e 100644 --- a/tests/test_toolchain_cxx.py +++ b/tests/test_toolchain_cxx.py @@ -3,7 +3,7 @@ import ctypes import tempfile import unittest -from nmigen._toolchain.cxx import * +from amaranth._toolchain.cxx import * class ToolchainCxxTestCase(unittest.TestCase): @@ -51,7 +51,7 @@ class ToolchainCxxTestCase(unittest.TestCase): self.assertEqual(library.answer(), 42) def test_include(self): - self.include_dir = tempfile.TemporaryDirectory(prefix="nmigen_hxx_") + self.include_dir = tempfile.TemporaryDirectory(prefix="amaranth_hxx_") with open(os.path.join(self.include_dir.name, "answer.h"), "w") as f: f.write("#define ANSWER 42") diff --git a/tests/utils.py b/tests/utils.py index 6355001..383474b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -6,10 +6,10 @@ import textwrap import traceback import unittest -from nmigen.hdl.ast import * -from nmigen.hdl.ir import * -from nmigen.back import rtlil -from nmigen._toolchain import require_tool +from amaranth.hdl.ast import * +from amaranth.hdl.ir import * +from amaranth.back import rtlil +from amaranth._toolchain import require_tool __all__ = ["FHDLTestCase"] @@ -46,7 +46,7 @@ class FHDLTestCase(unittest.TestCase): if mode == "hybrid": # A mix of BMC and k-induction, as per personal communication with Claire Wolf. - script = "setattr -unset init w:* a:nmigen.sample_reg %d" + script = "setattr -unset init w:* a:amaranth.sample_reg %d" mode = "bmc" else: script = ""