Rename nMigen to Amaranth HDL.
This commit is contained in:
parent
0b28a97ca0
commit
909a3b8be7
200 changed files with 14493 additions and 14451 deletions
|
|
@ -1,26 +1,7 @@
|
|||
try:
|
||||
try:
|
||||
from importlib import metadata as importlib_metadata # py3.8+ stdlib
|
||||
except ImportError:
|
||||
import importlib_metadata # py3.7- shim
|
||||
__version__ = importlib_metadata.version(__package__)
|
||||
except ImportError:
|
||||
# No importlib_metadata. This shouldn't normally happen, but some people prefer not installing
|
||||
# packages via pip at all, instead using PYTHONPATH directly or copying the package files into
|
||||
# `lib/pythonX.Y/site-packages`. Although not a recommended way, we still try to support it.
|
||||
__version__ = "unknown" # :nocov:
|
||||
from amaranth import *
|
||||
from amaranth import __all__
|
||||
|
||||
|
||||
from .hdl import *
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Shape", "unsigned", "signed",
|
||||
"Value", "Const", "C", "Mux", "Cat", "Repl", "Array", "Signal", "ClockSignal", "ResetSignal",
|
||||
"Module",
|
||||
"ClockDomain",
|
||||
"Elaboratable", "Fragment", "Instance",
|
||||
"Memory",
|
||||
"Record",
|
||||
"DomainRenamer", "ResetInserter", "EnableInserter",
|
||||
]
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen, use amaranth",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
import os
|
||||
import shutil
|
||||
|
||||
|
||||
__all__ = ["ToolNotFound", "tool_env_var", "has_tool", "require_tool"]
|
||||
|
||||
|
||||
class ToolNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def tool_env_var(name):
|
||||
return name.upper().replace("-", "_").replace("+", "X")
|
||||
|
||||
|
||||
def _get_tool(name):
|
||||
return os.environ.get(tool_env_var(name), name)
|
||||
|
||||
|
||||
def has_tool(name):
|
||||
return shutil.which(_get_tool(name)) is not None
|
||||
|
||||
|
||||
def require_tool(name):
|
||||
env_var = tool_env_var(name)
|
||||
path = _get_tool(name)
|
||||
if shutil.which(path) is None:
|
||||
if env_var in os.environ:
|
||||
raise ToolNotFound("Could not find required tool {} in {} as "
|
||||
"specified via the {} environment variable".
|
||||
format(name, path, env_var))
|
||||
else:
|
||||
raise ToolNotFound("Could not find required tool {} in PATH. Place "
|
||||
"it directly in PATH or specify path explicitly "
|
||||
"via the {} environment variable".
|
||||
format(name, env_var))
|
||||
return path
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
import tempfile
|
||||
import sysconfig
|
||||
import warnings
|
||||
import os.path
|
||||
|
||||
|
||||
__all__ = ["build_cxx"]
|
||||
|
||||
|
||||
def build_cxx(*, cxx_sources, output_name, include_dirs, macros):
|
||||
build_dir = tempfile.TemporaryDirectory(prefix="nmigen_cxx_")
|
||||
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
# Unforuntately, `ccompiler.compile` assumes the paths are relative, and interprets
|
||||
# the directory name of the source path specially. That makes it necessary to build in
|
||||
# the output directory directly.
|
||||
os.chdir(build_dir.name)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
|
||||
# This emits a DeprecationWarning on Python 3.6 and 3.10.
|
||||
from setuptools import distutils
|
||||
cc_driver = distutils.ccompiler.new_compiler()
|
||||
|
||||
cc_driver.output_dir = "."
|
||||
|
||||
cc = sysconfig.get_config_var("CC")
|
||||
cxx = sysconfig.get_config_var("CXX")
|
||||
cflags = sysconfig.get_config_var("CCSHARED")
|
||||
ld_flags = sysconfig.get_config_var("LDSHARED")
|
||||
ld_cxxflags = sysconfig.get_config_var("LDCXXSHARED")
|
||||
if ld_cxxflags is None:
|
||||
# PyPy doesn't have LDCXXSHARED. Glue it together from CXX and LDSHARED and hope that
|
||||
# the result actually works; not many good options here.
|
||||
ld_cxxflags = " ".join([cxx.split()[0], *ld_flags.split()[1:]])
|
||||
cc_driver.set_executables(
|
||||
compiler=f"{cc} {cflags}",
|
||||
compiler_so=f"{cc} {cflags}",
|
||||
compiler_cxx=f"{cxx} {cflags}",
|
||||
linker_so=ld_cxxflags,
|
||||
)
|
||||
|
||||
# Sometimes CCompiler is modified to have additional executable entries for compiling and
|
||||
# linking CXX shared objects (e.g. on Gentoo). These executables have to be set then.
|
||||
try:
|
||||
cc_driver.set_executables(
|
||||
compiler_so_cxx=f"{cxx} {cflags}",
|
||||
linker_so_cxx=ld_cxxflags,
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
for include_dir in include_dirs:
|
||||
cc_driver.add_include_dir(include_dir)
|
||||
for macro in macros:
|
||||
cc_driver.define_macro(macro)
|
||||
for cxx_filename, cxx_source in cxx_sources.items():
|
||||
with open(cxx_filename, "w") as f:
|
||||
f.write(cxx_source)
|
||||
|
||||
cxx_filenames = list(cxx_sources.keys())
|
||||
obj_filenames = cc_driver.object_filenames(cxx_filenames)
|
||||
so_filename = cc_driver.shared_object_filename(output_name)
|
||||
|
||||
cc_driver.compile(cxx_filenames)
|
||||
cc_driver.link_shared_object(obj_filenames, output_filename=so_filename, target_lang="c++")
|
||||
|
||||
return build_dir, so_filename
|
||||
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
|
@ -1,229 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
import warnings
|
||||
import pathlib
|
||||
try:
|
||||
from importlib import metadata as importlib_metadata # py3.8+ stdlib
|
||||
except ImportError:
|
||||
try:
|
||||
import importlib_metadata # py3.7- shim
|
||||
except ImportError:
|
||||
importlib_metadata = None # not installed
|
||||
try:
|
||||
try:
|
||||
from importlib import resources as importlib_resources
|
||||
try:
|
||||
importlib_resources.files # py3.9+ stdlib
|
||||
except AttributeError:
|
||||
import importlib_resources # py3.8- shim
|
||||
except ImportError:
|
||||
import importlib_resources # py3.6- shim
|
||||
except ImportError:
|
||||
importlib_resources = None
|
||||
|
||||
from . import has_tool, require_tool
|
||||
|
||||
|
||||
__all__ = ["YosysError", "YosysBinary", "find_yosys"]
|
||||
|
||||
|
||||
class YosysError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class YosysWarning(Warning):
|
||||
pass
|
||||
|
||||
|
||||
class YosysBinary:
|
||||
@classmethod
|
||||
def available(cls):
|
||||
"""Check for Yosys availability.
|
||||
|
||||
Returns
|
||||
-------
|
||||
available : bool
|
||||
``True`` if Yosys is installed, ``False`` otherwise. Installed binary may still not
|
||||
be runnable, or might be too old to be useful.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def version(cls):
|
||||
"""Get Yosys version.
|
||||
|
||||
Returns
|
||||
-------
|
||||
``None`` if version number could not be determined, or a 3-tuple ``(major, minor, distance)`` if it could.
|
||||
|
||||
major : int
|
||||
Major version.
|
||||
minor : int
|
||||
Minor version.
|
||||
distance : int
|
||||
Distance to last tag per ``git describe``. May not be exact for system Yosys.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def data_dir(cls):
|
||||
"""Get Yosys data directory.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_dir : pathlib.Path
|
||||
Yosys data directory (also known as "datdir").
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def run(cls, args, stdin=""):
|
||||
"""Run Yosys process.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
args : list of str
|
||||
Arguments, not including the program name.
|
||||
stdin : str
|
||||
Standard input.
|
||||
|
||||
Returns
|
||||
-------
|
||||
stdout : str
|
||||
Standard output.
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
YosysError
|
||||
Raised if Yosys returns a non-zero code. The exception message is the standard error
|
||||
output.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def _process_result(cls, returncode, stdout, stderr, ignore_warnings, src_loc_at):
|
||||
if returncode:
|
||||
raise YosysError(stderr.strip())
|
||||
if not ignore_warnings:
|
||||
for match in re.finditer(r"(?ms:^Warning: (.+)\n$)", stderr):
|
||||
message = match.group(1).replace("\n", " ")
|
||||
warnings.warn(message, YosysWarning, stacklevel=3 + src_loc_at)
|
||||
return stdout
|
||||
|
||||
|
||||
class _BuiltinYosys(YosysBinary):
|
||||
YOSYS_PACKAGE = "nmigen_yosys"
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
if importlib_metadata is None or importlib_resources is None:
|
||||
return False
|
||||
try:
|
||||
importlib_metadata.version(cls.YOSYS_PACKAGE)
|
||||
return True
|
||||
except importlib_metadata.PackageNotFoundError:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def version(cls):
|
||||
version = importlib_metadata.version(cls.YOSYS_PACKAGE)
|
||||
match = re.match(r"^(\d+)\.(\d+)(?:\.post(\d+))?", version)
|
||||
return (int(match[1]), int(match[2]), int(match[3] or 0))
|
||||
|
||||
@classmethod
|
||||
def data_dir(cls):
|
||||
return importlib_resources.files(cls.YOSYS_PACKAGE) / "share"
|
||||
|
||||
@classmethod
|
||||
def run(cls, args, stdin="", *, ignore_warnings=False, src_loc_at=0):
|
||||
popen = subprocess.Popen([sys.executable, "-m", cls.YOSYS_PACKAGE, *args],
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
encoding="utf-8")
|
||||
stdout, stderr = popen.communicate(stdin)
|
||||
return cls._process_result(popen.returncode, stdout, stderr, ignore_warnings, src_loc_at)
|
||||
|
||||
|
||||
class _SystemYosys(YosysBinary):
|
||||
YOSYS_BINARY = "yosys"
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
return has_tool(cls.YOSYS_BINARY)
|
||||
|
||||
@classmethod
|
||||
def version(cls):
|
||||
version = cls.run(["-V"])
|
||||
match = re.match(r"^Yosys (\d+)\.(\d+)(?:\+(\d+))?", version)
|
||||
if match:
|
||||
return (int(match[1]), int(match[2]), int(match[3] or 0))
|
||||
else:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def data_dir(cls):
|
||||
popen = subprocess.Popen([require_tool(cls.YOSYS_BINARY) + "-config", "--datdir"],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
encoding="utf-8")
|
||||
stdout, stderr = popen.communicate()
|
||||
if popen.returncode:
|
||||
raise YosysError(stderr.strip())
|
||||
return pathlib.Path(stdout.strip())
|
||||
|
||||
@classmethod
|
||||
def run(cls, args, stdin="", *, ignore_warnings=False, src_loc_at=0):
|
||||
popen = subprocess.Popen([require_tool(cls.YOSYS_BINARY), *args],
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
encoding="utf-8")
|
||||
stdout, stderr = popen.communicate(stdin)
|
||||
# If Yosys is built with an evaluation version of Verific, then Verific license
|
||||
# information is printed first. It consists of empty lines and lines starting with `--`,
|
||||
# which are not normally a part of Yosys output, and can be fairly safely removed.
|
||||
#
|
||||
# This is not ideal, but Verific license conditions rule out any other solution.
|
||||
stdout = re.sub(r"\A(-- .+\n|\n)*", "", stdout)
|
||||
return cls._process_result(popen.returncode, stdout, stderr, ignore_warnings, src_loc_at)
|
||||
|
||||
|
||||
def find_yosys(requirement):
|
||||
"""Find an available Yosys executable of required version.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
requirement : function
|
||||
Version check. Should return ``True`` if the version is acceptable, ``False`` otherwise.
|
||||
|
||||
Returns
|
||||
-------
|
||||
yosys_binary : subclass of YosysBinary
|
||||
Proxy for running the requested version of Yosys.
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
YosysError
|
||||
Raised if required Yosys version is not found.
|
||||
"""
|
||||
proxies = []
|
||||
clauses = os.environ.get("NMIGEN_USE_YOSYS", "system,builtin").split(",")
|
||||
for clause in clauses:
|
||||
if clause == "builtin":
|
||||
proxies.append(_BuiltinYosys)
|
||||
elif clause == "system":
|
||||
proxies.append(_SystemYosys)
|
||||
else:
|
||||
raise YosysError("The NMIGEN_USE_YOSYS environment variable contains "
|
||||
"an unrecognized clause {!r}"
|
||||
.format(clause))
|
||||
for proxy in proxies:
|
||||
if proxy.available():
|
||||
version = proxy.version()
|
||||
if version is not None and requirement(version):
|
||||
return proxy
|
||||
else:
|
||||
if "NMIGEN_USE_YOSYS" in os.environ:
|
||||
raise YosysError("Could not find an acceptable Yosys binary. Searched: {}"
|
||||
.format(", ".join(clauses)))
|
||||
else:
|
||||
raise YosysError("Could not find an acceptable Yosys binary. The `nmigen-yosys` PyPI "
|
||||
"package, if available for this platform, can be used as fallback")
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
import sys
|
||||
import warnings
|
||||
|
||||
from ._utils import get_linter_option
|
||||
|
||||
|
||||
__all__ = ["UnusedMustUse", "MustUse"]
|
||||
|
||||
|
||||
class UnusedMustUse(Warning):
|
||||
pass
|
||||
|
||||
|
||||
class MustUse:
|
||||
_MustUse__silence = False
|
||||
_MustUse__warning = UnusedMustUse
|
||||
|
||||
def __new__(cls, *args, src_loc_at=0, **kwargs):
|
||||
frame = sys._getframe(1 + src_loc_at)
|
||||
self = super().__new__(cls)
|
||||
self._MustUse__used = False
|
||||
self._MustUse__context = dict(
|
||||
filename=frame.f_code.co_filename,
|
||||
lineno=frame.f_lineno,
|
||||
source=self)
|
||||
return self
|
||||
|
||||
def __del__(self):
|
||||
if self._MustUse__silence:
|
||||
return
|
||||
if hasattr(self, "_MustUse__used") and not self._MustUse__used:
|
||||
if get_linter_option(self._MustUse__context["filename"],
|
||||
self._MustUse__warning.__name__, bool, True):
|
||||
warnings.warn_explicit(
|
||||
"{!r} created but never used".format(self), self._MustUse__warning,
|
||||
**self._MustUse__context)
|
||||
|
||||
|
||||
_old_excepthook = sys.excepthook
|
||||
def _silence_elaboratable(type, value, traceback):
|
||||
# Don't show anything if the interpreter crashed; that'd just obscure the exception
|
||||
# traceback instead of helping.
|
||||
MustUse._MustUse__silence = True
|
||||
_old_excepthook(type, value, traceback)
|
||||
sys.excepthook = _silence_elaboratable
|
||||
115
nmigen/_utils.py
115
nmigen/_utils.py
|
|
@ -1,115 +0,0 @@
|
|||
import contextlib
|
||||
import functools
|
||||
import warnings
|
||||
import linecache
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Iterable
|
||||
|
||||
from .utils import *
|
||||
|
||||
|
||||
__all__ = ["flatten", "union" , "log2_int", "bits_for", "memoize", "final", "deprecated",
|
||||
"get_linter_options", "get_linter_option"]
|
||||
|
||||
|
||||
def flatten(i):
|
||||
for e in i:
|
||||
if isinstance(e, Iterable):
|
||||
yield from flatten(e)
|
||||
else:
|
||||
yield e
|
||||
|
||||
|
||||
def union(i, start=None):
|
||||
r = start
|
||||
for e in i:
|
||||
if r is None:
|
||||
r = e
|
||||
else:
|
||||
r |= e
|
||||
return r
|
||||
|
||||
|
||||
def memoize(f):
|
||||
memo = OrderedDict()
|
||||
@functools.wraps(f)
|
||||
def g(*args):
|
||||
if args not in memo:
|
||||
memo[args] = f(*args)
|
||||
return memo[args]
|
||||
return g
|
||||
|
||||
|
||||
def final(cls):
|
||||
def init_subclass():
|
||||
raise TypeError("Subclassing {}.{} is not supported"
|
||||
.format(cls.__module__, cls.__name__))
|
||||
cls.__init_subclass__ = init_subclass
|
||||
return cls
|
||||
|
||||
|
||||
def deprecated(message, stacklevel=2):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
warnings.warn(message, DeprecationWarning, stacklevel=stacklevel)
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def _ignore_deprecated(f=None):
|
||||
if f is None:
|
||||
@contextlib.contextmanager
|
||||
def context_like():
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
|
||||
yield
|
||||
return context_like()
|
||||
else:
|
||||
@functools.wraps(f)
|
||||
def decorator_like(*args, **kwargs):
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
|
||||
f(*args, **kwargs)
|
||||
return decorator_like
|
||||
|
||||
|
||||
def extend(cls):
|
||||
def decorator(f):
|
||||
if isinstance(f, property):
|
||||
name = f.fget.__name__
|
||||
else:
|
||||
name = f.__name__
|
||||
setattr(cls, name, f)
|
||||
return decorator
|
||||
|
||||
|
||||
def get_linter_options(filename):
|
||||
first_line = linecache.getline(filename, 1)
|
||||
if first_line:
|
||||
match = re.match(r"^#\s*nmigen:\s*((?:\w+=\w+\s*)(?:,\s*\w+=\w+\s*)*)\n$", first_line)
|
||||
if match:
|
||||
return dict(map(lambda s: s.strip().split("=", 2), match.group(1).split(",")))
|
||||
return dict()
|
||||
|
||||
|
||||
def get_linter_option(filename, name, type, default):
|
||||
options = get_linter_options(filename)
|
||||
if name not in options:
|
||||
return default
|
||||
|
||||
option = options[name]
|
||||
if type is bool:
|
||||
if option in ("1", "yes", "enable"):
|
||||
return True
|
||||
if option in ("0", "no", "disable"):
|
||||
return False
|
||||
return default
|
||||
if type is int:
|
||||
try:
|
||||
return int(option, 0)
|
||||
except ValueError:
|
||||
return default
|
||||
assert False
|
||||
|
|
@ -1,2 +1,6 @@
|
|||
from .hdl.ast import AnyConst, AnySeq, Assert, Assume, Cover
|
||||
from .hdl.ast import Past, Stable, Rose, Fell, Initial
|
||||
from amaranth.asserts import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.asserts, use amaranth.asserts",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
from amaranth.back import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.back, use amaranth.back",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
@ -1,41 +1,7 @@
|
|||
from .._toolchain.yosys import *
|
||||
from . import rtlil
|
||||
from amaranth.back.cxxrtl import *
|
||||
from amaranth.back.cxxrtl import __all__
|
||||
|
||||
|
||||
__all__ = ["YosysError", "convert", "convert_fragment"]
|
||||
|
||||
|
||||
def _convert_rtlil_text(rtlil_text, black_boxes, *, src_loc_at=0):
|
||||
if black_boxes is not None:
|
||||
if not isinstance(black_boxes, dict):
|
||||
raise TypeError("CXXRTL black boxes must be a dictionary, not {!r}"
|
||||
.format(black_boxes))
|
||||
for box_name, box_source in black_boxes.items():
|
||||
if not isinstance(box_name, str):
|
||||
raise TypeError("CXXRTL black box name must be a string, not {!r}"
|
||||
.format(box_name))
|
||||
if not isinstance(box_source, str):
|
||||
raise TypeError("CXXRTL black box source code must be a string, not {!r}"
|
||||
.format(box_source))
|
||||
|
||||
yosys = find_yosys(lambda ver: ver >= (0, 9, 3468))
|
||||
|
||||
script = []
|
||||
if black_boxes is not None:
|
||||
for box_name, box_source in black_boxes.items():
|
||||
script.append("read_ilang <<rtlil\n{}\nrtlil".format(box_source))
|
||||
script.append("read_ilang <<rtlil\n{}\nrtlil".format(rtlil_text))
|
||||
script.append("delete w:$verilog_initial_trigger")
|
||||
script.append("write_cxxrtl")
|
||||
|
||||
return yosys.run(["-q", "-"], "\n".join(script), src_loc_at=1 + src_loc_at)
|
||||
|
||||
|
||||
def convert_fragment(*args, black_boxes=None, **kwargs):
|
||||
rtlil_text, name_map = rtlil.convert_fragment(*args, **kwargs)
|
||||
return _convert_rtlil_text(rtlil_text, black_boxes, src_loc_at=1), name_map
|
||||
|
||||
|
||||
def convert(*args, black_boxes=None, **kwargs):
|
||||
rtlil_text = rtlil.convert(*args, **kwargs)
|
||||
return _convert_rtlil_text(rtlil_text, black_boxes, src_loc_at=1)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.back.cxxrtl, use amaranth.back.cxxrtl",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,7 @@
|
|||
from amaranth.back.pysim import *
|
||||
from amaranth.back.pysim import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from ..sim import *
|
||||
|
||||
|
||||
__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"]
|
||||
|
||||
|
||||
# TODO(nmigen-0.4): remove
|
||||
warnings.warn("instead of nmigen.back.pysim.*, use nmigen.sim.*",
|
||||
warnings.warn("instead of nmigen.back.pysim, use amaranth.back.pysim",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
1059
nmigen/back/rtlil.py
1059
nmigen/back/rtlil.py
File diff suppressed because it is too large
Load diff
|
|
@ -1,61 +1,7 @@
|
|||
from .._toolchain.yosys import *
|
||||
from . import rtlil
|
||||
from amaranth.back.verilog import *
|
||||
from amaranth.back.verilog import __all__
|
||||
|
||||
|
||||
__all__ = ["YosysError", "convert", "convert_fragment"]
|
||||
|
||||
|
||||
def _convert_rtlil_text(rtlil_text, *, strip_internal_attrs=False, write_verilog_opts=()):
|
||||
# this version requirement needs to be synchronized with the one in setup.py!
|
||||
yosys = find_yosys(lambda ver: ver >= (0, 9))
|
||||
yosys_version = yosys.version()
|
||||
|
||||
script = []
|
||||
script.append("read_ilang <<rtlil\n{}\nrtlil".format(rtlil_text))
|
||||
|
||||
if yosys_version >= (0, 9, 3468):
|
||||
# Yosys >=0.9+3468 (since commit 128522f1) emits the workaround for the `always @*`
|
||||
# initial scheduling issue on its own.
|
||||
script.append("delete w:$verilog_initial_trigger")
|
||||
|
||||
if yosys_version >= (0, 9, 3527):
|
||||
# Yosys >=0.9+3527 (since commit 656ee70f) supports the `-nomux` option for the `proc`
|
||||
# script pass. Because the individual `proc_*` passes are not a stable interface,
|
||||
# `proc -nomux` is used instead, if available.
|
||||
script.append("proc -nomux")
|
||||
else:
|
||||
# On earlier versions, use individual `proc_*` passes; this is a known range of Yosys
|
||||
# versions and we know it's compatible with what nMigen does.
|
||||
script.append("proc_init")
|
||||
script.append("proc_arst")
|
||||
script.append("proc_dff")
|
||||
script.append("proc_clean")
|
||||
script.append("memory_collect")
|
||||
|
||||
if strip_internal_attrs:
|
||||
attr_map = []
|
||||
attr_map.append("-remove generator")
|
||||
attr_map.append("-remove top")
|
||||
attr_map.append("-remove src")
|
||||
attr_map.append("-remove nmigen.hierarchy")
|
||||
attr_map.append("-remove nmigen.decoding")
|
||||
script.append("attrmap {}".format(" ".join(attr_map)))
|
||||
script.append("attrmap -modattr {}".format(" ".join(attr_map)))
|
||||
|
||||
script.append("write_verilog -norename {}".format(" ".join(write_verilog_opts)))
|
||||
|
||||
return yosys.run(["-q", "-"], "\n".join(script),
|
||||
# At the moment, Yosys always shows a warning indicating that not all processes can be
|
||||
# translated to Verilog. We carefully emit only the processes that *can* be translated, and
|
||||
# squash this warning. Once Yosys' write_verilog pass is fixed, we should remove this.
|
||||
ignore_warnings=True)
|
||||
|
||||
|
||||
def convert_fragment(*args, strip_internal_attrs=False, **kwargs):
|
||||
rtlil_text, name_map = rtlil.convert_fragment(*args, **kwargs)
|
||||
return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs), name_map
|
||||
|
||||
|
||||
def convert(*args, strip_internal_attrs=False, **kwargs):
|
||||
rtlil_text = rtlil.convert(*args, **kwargs)
|
||||
return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.back.verilog, use amaranth.back.verilog",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
from .dsl import *
|
||||
from .res import ResourceError
|
||||
from .plat import *
|
||||
from amaranth.build import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.build, use amaranth.build",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,256 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
from amaranth.build.dsl import *
|
||||
from amaranth.build.dsl import __all__
|
||||
|
||||
|
||||
__all__ = ["Pins", "PinsN", "DiffPairs", "DiffPairsN",
|
||||
"Attrs", "Clock", "Subsignal", "Resource", "Connector"]
|
||||
|
||||
|
||||
class Pins:
|
||||
def __init__(self, names, *, dir="io", invert=False, conn=None, assert_width=None):
|
||||
if not isinstance(names, str):
|
||||
raise TypeError("Names must be a whitespace-separated string, not {!r}"
|
||||
.format(names))
|
||||
names = names.split()
|
||||
|
||||
if conn is not None:
|
||||
conn_name, conn_number = conn
|
||||
if not (isinstance(conn_name, str) and isinstance(conn_number, (int, str))):
|
||||
raise TypeError("Connector must be None or a pair of string (connector name) and "
|
||||
"integer/string (connector number), not {!r}"
|
||||
.format(conn))
|
||||
names = ["{}_{}:{}".format(conn_name, conn_number, name) for name in names]
|
||||
|
||||
if dir not in ("i", "o", "io", "oe"):
|
||||
raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", or \"io\", not {!r}"
|
||||
.format(dir))
|
||||
|
||||
if assert_width is not None and len(names) != assert_width:
|
||||
raise AssertionError("{} names are specified ({}), but {} names are expected"
|
||||
.format(len(names), " ".join(names), assert_width))
|
||||
|
||||
self.names = names
|
||||
self.dir = dir
|
||||
self.invert = bool(invert)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.names)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.names)
|
||||
|
||||
def map_names(self, mapping, resource):
|
||||
mapped_names = []
|
||||
for name in self.names:
|
||||
while ":" in name:
|
||||
if name not in mapping:
|
||||
raise NameError("Resource {!r} refers to nonexistent connector pin {}"
|
||||
.format(resource, name))
|
||||
name = mapping[name]
|
||||
mapped_names.append(name)
|
||||
return mapped_names
|
||||
|
||||
def __repr__(self):
|
||||
return "(pins{} {} {})".format("-n" if self.invert else "",
|
||||
self.dir, " ".join(self.names))
|
||||
|
||||
|
||||
def PinsN(*args, **kwargs):
|
||||
return Pins(*args, invert=True, **kwargs)
|
||||
|
||||
|
||||
class DiffPairs:
|
||||
def __init__(self, p, n, *, dir="io", invert=False, conn=None, assert_width=None):
|
||||
self.p = Pins(p, dir=dir, conn=conn, assert_width=assert_width)
|
||||
self.n = Pins(n, dir=dir, conn=conn, assert_width=assert_width)
|
||||
|
||||
if len(self.p.names) != len(self.n.names):
|
||||
raise TypeError("Positive and negative pins must have the same width, but {!r} "
|
||||
"and {!r} do not"
|
||||
.format(self.p, self.n))
|
||||
|
||||
self.dir = dir
|
||||
self.invert = bool(invert)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.p.names)
|
||||
|
||||
def __iter__(self):
|
||||
return zip(self.p.names, self.n.names)
|
||||
|
||||
def __repr__(self):
|
||||
return "(diffpairs{} {} (p {}) (n {}))".format("-n" if self.invert else "",
|
||||
self.dir, " ".join(self.p.names), " ".join(self.n.names))
|
||||
|
||||
|
||||
def DiffPairsN(*args, **kwargs):
|
||||
return DiffPairs(*args, invert=True, **kwargs)
|
||||
|
||||
|
||||
class Attrs(OrderedDict):
|
||||
def __init__(self, **attrs):
|
||||
for key, value in attrs.items():
|
||||
if not (value is None or isinstance(value, (str, int)) or hasattr(value, "__call__")):
|
||||
raise TypeError("Value of attribute {} must be None, int, str, or callable, "
|
||||
"not {!r}"
|
||||
.format(key, value))
|
||||
|
||||
super().__init__(**attrs)
|
||||
|
||||
def __repr__(self):
|
||||
items = []
|
||||
for key, value in self.items():
|
||||
if value is None:
|
||||
items.append("!" + key)
|
||||
else:
|
||||
items.append(key + "=" + repr(value))
|
||||
return "(attrs {})".format(" ".join(items))
|
||||
|
||||
|
||||
class Clock:
|
||||
def __init__(self, frequency):
|
||||
if not isinstance(frequency, (float, int)):
|
||||
raise TypeError("Clock frequency must be a number")
|
||||
|
||||
self.frequency = float(frequency)
|
||||
|
||||
@property
|
||||
def period(self):
|
||||
return 1 / self.frequency
|
||||
|
||||
def __repr__(self):
|
||||
return "(clock {})".format(self.frequency)
|
||||
|
||||
|
||||
class Subsignal:
|
||||
def __init__(self, name, *args):
|
||||
self.name = name
|
||||
self.ios = []
|
||||
self.attrs = Attrs()
|
||||
self.clock = None
|
||||
|
||||
if not args:
|
||||
raise ValueError("Missing I/O constraints")
|
||||
for arg in args:
|
||||
if isinstance(arg, (Pins, DiffPairs)):
|
||||
if not self.ios:
|
||||
self.ios.append(arg)
|
||||
else:
|
||||
raise TypeError("Pins and DiffPairs are incompatible with other location or "
|
||||
"subsignal constraints, but {!r} appears after {!r}"
|
||||
.format(arg, self.ios[-1]))
|
||||
elif isinstance(arg, Subsignal):
|
||||
if not self.ios or isinstance(self.ios[-1], Subsignal):
|
||||
self.ios.append(arg)
|
||||
else:
|
||||
raise TypeError("Subsignal is incompatible with location constraints, but "
|
||||
"{!r} appears after {!r}"
|
||||
.format(arg, self.ios[-1]))
|
||||
elif isinstance(arg, Attrs):
|
||||
self.attrs.update(arg)
|
||||
elif isinstance(arg, Clock):
|
||||
if self.ios and isinstance(self.ios[-1], (Pins, DiffPairs)):
|
||||
if self.clock is None:
|
||||
self.clock = arg
|
||||
else:
|
||||
raise ValueError("Clock constraint can be applied only once")
|
||||
else:
|
||||
raise TypeError("Clock constraint can only be applied to Pins or DiffPairs, "
|
||||
"not {!r}"
|
||||
.format(self.ios[-1]))
|
||||
else:
|
||||
raise TypeError("Constraint must be one of Pins, DiffPairs, Subsignal, Attrs, "
|
||||
"or Clock, not {!r}"
|
||||
.format(arg))
|
||||
|
||||
def _content_repr(self):
|
||||
parts = []
|
||||
for io in self.ios:
|
||||
parts.append(repr(io))
|
||||
if self.clock is not None:
|
||||
parts.append(repr(self.clock))
|
||||
if self.attrs:
|
||||
parts.append(repr(self.attrs))
|
||||
return " ".join(parts)
|
||||
|
||||
def __repr__(self):
|
||||
return "(subsignal {} {})".format(self.name, self._content_repr())
|
||||
|
||||
|
||||
class Resource(Subsignal):
|
||||
@classmethod
|
||||
def family(cls, name_or_number, number=None, *, ios, default_name, name_suffix=""):
|
||||
# This constructor accepts two different forms:
|
||||
# 1. Number-only form:
|
||||
# Resource.family(0, default_name="name", ios=[Pins("A0 A1")])
|
||||
# 2. Name-and-number (name override) form:
|
||||
# Resource.family("override", 0, default_name="name", ios=...)
|
||||
# This makes it easier to build abstractions for resources, e.g. an SPIResource abstraction
|
||||
# could simply delegate to `Resource.family(*args, default_name="spi", ios=ios)`.
|
||||
# The name_suffix argument is meant to support creating resources with
|
||||
# similar names, such as spi_flash, spi_flash_2x, etc.
|
||||
if name_suffix: # Only add "_" if we actually have a suffix.
|
||||
name_suffix = "_" + name_suffix
|
||||
|
||||
if number is None: # name_or_number is number
|
||||
return cls(default_name + name_suffix, name_or_number, *ios)
|
||||
else: # name_or_number is name
|
||||
return cls(name_or_number + name_suffix, number, *ios)
|
||||
|
||||
def __init__(self, name, number, *args):
|
||||
super().__init__(name, *args)
|
||||
|
||||
self.number = number
|
||||
|
||||
def __repr__(self):
|
||||
return "(resource {} {} {})".format(self.name, self.number, self._content_repr())
|
||||
|
||||
|
||||
class Connector:
|
||||
def __init__(self, name, number, io, *, conn=None):
|
||||
self.name = name
|
||||
self.number = number
|
||||
mapping = OrderedDict()
|
||||
|
||||
if isinstance(io, dict):
|
||||
for conn_pin, plat_pin in io.items():
|
||||
if not isinstance(conn_pin, str):
|
||||
raise TypeError("Connector pin name must be a string, not {!r}"
|
||||
.format(conn_pin))
|
||||
if not isinstance(plat_pin, str):
|
||||
raise TypeError("Platform pin name must be a string, not {!r}"
|
||||
.format(plat_pin))
|
||||
mapping[conn_pin] = plat_pin
|
||||
|
||||
elif isinstance(io, str):
|
||||
for conn_pin, plat_pin in enumerate(io.split(), start=1):
|
||||
if plat_pin == "-":
|
||||
continue
|
||||
|
||||
mapping[str(conn_pin)] = plat_pin
|
||||
else:
|
||||
raise TypeError("Connector I/Os must be a dictionary or a string, not {!r}"
|
||||
.format(io))
|
||||
|
||||
if conn is not None:
|
||||
conn_name, conn_number = conn
|
||||
if not (isinstance(conn_name, str) and isinstance(conn_number, (int, str))):
|
||||
raise TypeError("Connector must be None or a pair of string (connector name) and "
|
||||
"integer/string (connector number), not {!r}"
|
||||
.format(conn))
|
||||
|
||||
for conn_pin, plat_pin in mapping.items():
|
||||
mapping[conn_pin] = "{}_{}:{}".format(conn_name, conn_number, plat_pin)
|
||||
|
||||
self.mapping = mapping
|
||||
|
||||
def __repr__(self):
|
||||
return "(connector {} {} {})".format(self.name, self.number,
|
||||
" ".join("{}=>{}".format(conn, plat)
|
||||
for conn, plat in self.mapping.items()))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.mapping)
|
||||
|
||||
def __iter__(self):
|
||||
for conn_pin, plat_pin in self.mapping.items():
|
||||
yield "{}_{}:{}".format(self.name, self.number, conn_pin), plat_pin
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.build.dsl, use amaranth.build.dsl",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,444 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
from abc import ABCMeta, abstractmethod, abstractproperty
|
||||
import os
|
||||
import textwrap
|
||||
import re
|
||||
import jinja2
|
||||
from amaranth.build.plat import *
|
||||
from amaranth.build.plat import __all__
|
||||
|
||||
from .. import __version__
|
||||
from .._toolchain import *
|
||||
from ..hdl import *
|
||||
from ..hdl.xfrm import SampleLowerer, DomainLowerer
|
||||
from ..lib.cdc import ResetSynchronizer
|
||||
from ..back import rtlil, verilog
|
||||
from .res import *
|
||||
from .run import *
|
||||
|
||||
|
||||
__all__ = ["Platform", "TemplatedPlatform"]
|
||||
|
||||
|
||||
class Platform(ResourceManager, metaclass=ABCMeta):
|
||||
resources = abstractproperty()
|
||||
connectors = abstractproperty()
|
||||
default_clk = None
|
||||
default_rst = None
|
||||
required_tools = abstractproperty()
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(self.resources, self.connectors)
|
||||
|
||||
self.extra_files = OrderedDict()
|
||||
|
||||
self._prepared = False
|
||||
|
||||
@property
|
||||
def default_clk_constraint(self):
|
||||
if self.default_clk is None:
|
||||
raise AttributeError("Platform '{}' does not define a default clock"
|
||||
.format(type(self).__name__))
|
||||
return self.lookup(self.default_clk).clock
|
||||
|
||||
@property
|
||||
def default_clk_frequency(self):
|
||||
constraint = self.default_clk_constraint
|
||||
if constraint is None:
|
||||
raise AttributeError("Platform '{}' does not constrain its default clock"
|
||||
.format(type(self).__name__))
|
||||
return constraint.frequency
|
||||
|
||||
def add_file(self, filename, content):
|
||||
if not isinstance(filename, str):
|
||||
raise TypeError("File name must be a string, not {!r}"
|
||||
.format(filename))
|
||||
if hasattr(content, "read"):
|
||||
content = content.read()
|
||||
elif not isinstance(content, (str, bytes)):
|
||||
raise TypeError("File contents must be str, bytes, or a file-like object, not {!r}"
|
||||
.format(content))
|
||||
if filename in self.extra_files:
|
||||
if self.extra_files[filename] != content:
|
||||
raise ValueError("File {!r} already exists"
|
||||
.format(filename))
|
||||
else:
|
||||
self.extra_files[filename] = content
|
||||
|
||||
def iter_files(self, *suffixes):
|
||||
for filename in self.extra_files:
|
||||
if filename.endswith(suffixes):
|
||||
yield filename
|
||||
|
||||
@property
|
||||
def _toolchain_env_var(self):
|
||||
return f"NMIGEN_ENV_{self.toolchain}"
|
||||
|
||||
def build(self, elaboratable, name="top",
|
||||
build_dir="build", do_build=True,
|
||||
program_opts=None, do_program=False,
|
||||
**kwargs):
|
||||
# The following code performs a best-effort check for presence of required tools upfront,
|
||||
# before performing any build actions, to provide a better diagnostic. It does not handle
|
||||
# several corner cases:
|
||||
# 1. `require_tool` does not source toolchain environment scripts, so if such a script
|
||||
# is used, the check is skipped, and `execute_local()` may fail;
|
||||
# 2. if the design is not built (do_build=False), most of the tools are not required and
|
||||
# in fact might not be available if the design will be built manually with a different
|
||||
# environment script specified, or on a different machine; however, Yosys is required
|
||||
# by virtually every platform anyway, to provide debug Verilog output, and `prepare()`
|
||||
# may fail.
|
||||
# This is OK because even if `require_tool` succeeds, the toolchain might be broken anyway.
|
||||
# The check only serves to catch common errors earlier.
|
||||
if do_build and self._toolchain_env_var not in os.environ:
|
||||
for tool in self.required_tools:
|
||||
require_tool(tool)
|
||||
|
||||
plan = self.prepare(elaboratable, name, **kwargs)
|
||||
if not do_build:
|
||||
return plan
|
||||
|
||||
products = plan.execute_local(build_dir)
|
||||
if not do_program:
|
||||
return products
|
||||
|
||||
self.toolchain_program(products, name, **(program_opts or {}))
|
||||
|
||||
def has_required_tools(self):
|
||||
if self._toolchain_env_var in os.environ:
|
||||
return True
|
||||
return all(has_tool(name) for name in self.required_tools)
|
||||
|
||||
def create_missing_domain(self, name):
|
||||
# Simple instantiation of a clock domain driven directly by the board clock and reset.
|
||||
# This implementation uses a single ResetSynchronizer to ensure that:
|
||||
# * an external reset is definitely synchronized to the system clock;
|
||||
# * release of power-on reset, which is inherently asynchronous, is synchronized to
|
||||
# the system clock.
|
||||
# Many device families provide advanced primitives for tackling reset. If these exist,
|
||||
# they should be used instead.
|
||||
if name == "sync" and self.default_clk is not None:
|
||||
clk_i = self.request(self.default_clk).i
|
||||
if self.default_rst is not None:
|
||||
rst_i = self.request(self.default_rst).i
|
||||
else:
|
||||
rst_i = Const(0)
|
||||
|
||||
m = Module()
|
||||
m.domains += ClockDomain("sync")
|
||||
m.d.comb += ClockSignal("sync").eq(clk_i)
|
||||
m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync")
|
||||
return m
|
||||
|
||||
def prepare(self, elaboratable, name="top", **kwargs):
|
||||
assert not self._prepared
|
||||
self._prepared = True
|
||||
|
||||
fragment = Fragment.get(elaboratable, self)
|
||||
fragment = SampleLowerer()(fragment)
|
||||
fragment._propagate_domains(self.create_missing_domain, platform=self)
|
||||
fragment = DomainLowerer()(fragment)
|
||||
|
||||
def add_pin_fragment(pin, pin_fragment):
|
||||
pin_fragment = Fragment.get(pin_fragment, self)
|
||||
if not isinstance(pin_fragment, Instance):
|
||||
pin_fragment.flatten = True
|
||||
fragment.add_subfragment(pin_fragment, name="pin_{}".format(pin.name))
|
||||
|
||||
for pin, port, attrs, invert in self.iter_single_ended_pins():
|
||||
if pin.dir == "i":
|
||||
add_pin_fragment(pin, self.get_input(pin, port, attrs, invert))
|
||||
if pin.dir == "o":
|
||||
add_pin_fragment(pin, self.get_output(pin, port, attrs, invert))
|
||||
if pin.dir == "oe":
|
||||
add_pin_fragment(pin, self.get_tristate(pin, port, attrs, invert))
|
||||
if pin.dir == "io":
|
||||
add_pin_fragment(pin, self.get_input_output(pin, port, attrs, invert))
|
||||
|
||||
for pin, port, attrs, invert in self.iter_differential_pins():
|
||||
if pin.dir == "i":
|
||||
add_pin_fragment(pin, self.get_diff_input(pin, port, attrs, invert))
|
||||
if pin.dir == "o":
|
||||
add_pin_fragment(pin, self.get_diff_output(pin, port, attrs, invert))
|
||||
if pin.dir == "oe":
|
||||
add_pin_fragment(pin, self.get_diff_tristate(pin, port, attrs, invert))
|
||||
if pin.dir == "io":
|
||||
add_pin_fragment(pin, self.get_diff_input_output(pin, port, attrs, invert))
|
||||
|
||||
fragment._propagate_ports(ports=self.iter_ports(), all_undef_as_ports=False)
|
||||
return self.toolchain_prepare(fragment, name, **kwargs)
|
||||
|
||||
@abstractmethod
|
||||
def toolchain_prepare(self, fragment, name, **kwargs):
|
||||
"""
|
||||
Convert the ``fragment`` and constraints recorded in this :class:`Platform` into
|
||||
a :class:`BuildPlan`.
|
||||
"""
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def toolchain_program(self, products, name, **kwargs):
|
||||
"""
|
||||
Extract bitstream for fragment ``name`` from ``products`` and download it to a target.
|
||||
"""
|
||||
raise NotImplementedError("Platform '{}' does not support programming"
|
||||
.format(type(self).__name__))
|
||||
|
||||
def _check_feature(self, feature, pin, attrs, valid_xdrs, valid_attrs):
|
||||
if len(valid_xdrs) == 0:
|
||||
raise NotImplementedError("Platform '{}' does not support {}"
|
||||
.format(type(self).__name__, feature))
|
||||
elif pin.xdr not in valid_xdrs:
|
||||
raise NotImplementedError("Platform '{}' does not support {} for XDR {}"
|
||||
.format(type(self).__name__, feature, pin.xdr))
|
||||
|
||||
if not valid_attrs and attrs:
|
||||
raise NotImplementedError("Platform '{}' does not support attributes for {}"
|
||||
.format(type(self).__name__, feature))
|
||||
|
||||
@staticmethod
|
||||
def _invert_if(invert, value):
|
||||
if invert:
|
||||
return ~value
|
||||
else:
|
||||
return value
|
||||
|
||||
def get_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input", pin, attrs,
|
||||
valid_xdrs=(0,), valid_attrs=None)
|
||||
|
||||
m = Module()
|
||||
m.d.comb += pin.i.eq(self._invert_if(invert, port))
|
||||
return m
|
||||
|
||||
def get_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended output", pin, attrs,
|
||||
valid_xdrs=(0,), valid_attrs=None)
|
||||
|
||||
m = Module()
|
||||
m.d.comb += port.eq(self._invert_if(invert, pin.o))
|
||||
return m
|
||||
|
||||
def get_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended tristate", pin, attrs,
|
||||
valid_xdrs=(0,), valid_attrs=None)
|
||||
|
||||
m = Module()
|
||||
m.submodules += Instance("$tribuf",
|
||||
p_WIDTH=pin.width,
|
||||
i_EN=pin.oe,
|
||||
i_A=self._invert_if(invert, pin.o),
|
||||
o_Y=port,
|
||||
)
|
||||
return m
|
||||
|
||||
def get_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input/output", pin, attrs,
|
||||
valid_xdrs=(0,), valid_attrs=None)
|
||||
|
||||
m = Module()
|
||||
m.submodules += Instance("$tribuf",
|
||||
p_WIDTH=pin.width,
|
||||
i_EN=pin.oe,
|
||||
i_A=self._invert_if(invert, pin.o),
|
||||
o_Y=port,
|
||||
)
|
||||
m.d.comb += pin.i.eq(self._invert_if(invert, port))
|
||||
return m
|
||||
|
||||
def get_diff_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input", pin, attrs,
|
||||
valid_xdrs=(), valid_attrs=None)
|
||||
|
||||
def get_diff_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential output", pin, attrs,
|
||||
valid_xdrs=(), valid_attrs=None)
|
||||
|
||||
def get_diff_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential tristate", pin, attrs,
|
||||
valid_xdrs=(), valid_attrs=None)
|
||||
|
||||
def get_diff_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input/output", pin, attrs,
|
||||
valid_xdrs=(), valid_attrs=None)
|
||||
|
||||
|
||||
class TemplatedPlatform(Platform):
|
||||
toolchain = abstractproperty()
|
||||
file_templates = abstractproperty()
|
||||
command_templates = abstractproperty()
|
||||
|
||||
build_script_templates = {
|
||||
"build_{{name}}.sh": """
|
||||
# {{autogenerated}}
|
||||
set -e{{verbose("x")}}
|
||||
[ -n "${{platform._toolchain_env_var}}" ] && . "${{platform._toolchain_env_var}}"
|
||||
{{emit_commands("sh")}}
|
||||
""",
|
||||
"build_{{name}}.bat": """
|
||||
@rem {{autogenerated}}
|
||||
{{quiet("@echo off")}}
|
||||
if defined {{platform._toolchain_env_var}} call %{{platform._toolchain_env_var}}%
|
||||
{{emit_commands("bat")}}
|
||||
""",
|
||||
}
|
||||
|
||||
def iter_clock_constraints(self):
|
||||
for net_signal, port_signal, frequency in super().iter_clock_constraints():
|
||||
# Skip any clock constraints placed on signals that are never used in the design.
|
||||
# Otherwise, it will cause a crash in the vendor platform if it supports clock
|
||||
# constraints on non-port nets.
|
||||
if net_signal not in self._name_map:
|
||||
continue
|
||||
yield net_signal, port_signal, frequency
|
||||
|
||||
def toolchain_prepare(self, fragment, name, **kwargs):
|
||||
# Restrict the name of the design to a strict alphanumeric character set. Platforms will
|
||||
# interpolate the name of the design in many different contexts: filesystem paths, Python
|
||||
# scripts, Tcl scripts, ad-hoc constraint files, and so on. It is not practical to add
|
||||
# escaping code that handles every one of their edge cases, so make sure we never hit them
|
||||
# in the first place.
|
||||
invalid_char = re.match(r"[^A-Za-z0-9_]", name)
|
||||
if invalid_char:
|
||||
raise ValueError("Design name {!r} contains invalid character {!r}; only alphanumeric "
|
||||
"characters are valid in design names"
|
||||
.format(name, invalid_char.group(0)))
|
||||
|
||||
# This notice serves a dual purpose: to explain that the file is autogenerated,
|
||||
# and to incorporate the nMigen version into generated code.
|
||||
autogenerated = "Automatically generated by nMigen {}. Do not edit.".format(__version__)
|
||||
|
||||
rtlil_text, self._name_map = rtlil.convert_fragment(fragment, name=name)
|
||||
|
||||
def emit_rtlil():
|
||||
return rtlil_text
|
||||
|
||||
def emit_verilog(opts=()):
|
||||
return verilog._convert_rtlil_text(rtlil_text,
|
||||
strip_internal_attrs=True, write_verilog_opts=opts)
|
||||
|
||||
def emit_debug_verilog(opts=()):
|
||||
return verilog._convert_rtlil_text(rtlil_text,
|
||||
strip_internal_attrs=False, write_verilog_opts=opts)
|
||||
|
||||
def emit_commands(syntax):
|
||||
commands = []
|
||||
|
||||
for name in self.required_tools:
|
||||
env_var = tool_env_var(name)
|
||||
if syntax == "sh":
|
||||
template = ": ${{{env_var}:={name}}}"
|
||||
elif syntax == "bat":
|
||||
template = \
|
||||
"if [%{env_var}%] equ [\"\"] set {env_var}=\n" \
|
||||
"if [%{env_var}%] equ [] set {env_var}={name}"
|
||||
else:
|
||||
assert False
|
||||
commands.append(template.format(env_var=env_var, name=name))
|
||||
|
||||
for index, command_tpl in enumerate(self.command_templates):
|
||||
command = render(command_tpl, origin="<command#{}>".format(index + 1),
|
||||
syntax=syntax)
|
||||
command = re.sub(r"\s+", " ", command)
|
||||
if syntax == "sh":
|
||||
commands.append(command)
|
||||
elif syntax == "bat":
|
||||
commands.append(command + " || exit /b")
|
||||
else:
|
||||
assert False
|
||||
|
||||
return "\n".join(commands)
|
||||
|
||||
def get_override(var):
|
||||
var_env = "NMIGEN_{}".format(var)
|
||||
if var_env in os.environ:
|
||||
# On Windows, there is no way to define an "empty but set" variable; it is tempting
|
||||
# to use a quoted empty string, but it doesn't do what one would expect. Recognize
|
||||
# this as a useful pattern anyway, and treat `set VAR=""` on Windows the same way
|
||||
# `export VAR=` is treated on Linux.
|
||||
return re.sub(r'^\"\"$', "", os.environ[var_env])
|
||||
elif var in kwargs:
|
||||
if isinstance(kwargs[var], str):
|
||||
return textwrap.dedent(kwargs[var]).strip()
|
||||
else:
|
||||
return kwargs[var]
|
||||
else:
|
||||
return jinja2.Undefined(name=var)
|
||||
|
||||
@jinja2.contextfunction
|
||||
def invoke_tool(context, name):
|
||||
env_var = tool_env_var(name)
|
||||
if context.parent["syntax"] == "sh":
|
||||
return "\"${}\"".format(env_var)
|
||||
elif context.parent["syntax"] == "bat":
|
||||
return "%{}%".format(env_var)
|
||||
else:
|
||||
assert False
|
||||
|
||||
def options(opts):
|
||||
if isinstance(opts, str):
|
||||
return opts
|
||||
else:
|
||||
return " ".join(opts)
|
||||
|
||||
def hierarchy(signal, separator):
|
||||
return separator.join(self._name_map[signal][1:])
|
||||
|
||||
def ascii_escape(string):
|
||||
def escape_one(match):
|
||||
if match.group(1) is None:
|
||||
return match.group(2)
|
||||
else:
|
||||
return "_{:02x}_".format(ord(match.group(1)[0]))
|
||||
return "".join(escape_one(m) for m in re.finditer(r"([^A-Za-z0-9_])|(.)", string))
|
||||
|
||||
def tcl_escape(string):
|
||||
return "{" + re.sub(r"([{}\\])", r"\\\1", string) + "}"
|
||||
|
||||
def tcl_quote(string):
|
||||
return '"' + re.sub(r"([$[\\])", r"\\\1", string) + '"'
|
||||
|
||||
def verbose(arg):
|
||||
if get_override("verbose"):
|
||||
return arg
|
||||
else:
|
||||
return jinja2.Undefined(name="quiet")
|
||||
|
||||
def quiet(arg):
|
||||
if get_override("verbose"):
|
||||
return jinja2.Undefined(name="quiet")
|
||||
else:
|
||||
return arg
|
||||
|
||||
def render(source, origin, syntax=None):
|
||||
try:
|
||||
source = textwrap.dedent(source).strip()
|
||||
compiled = jinja2.Template(source,
|
||||
trim_blocks=True, lstrip_blocks=True, undefined=jinja2.StrictUndefined)
|
||||
compiled.environment.filters["options"] = options
|
||||
compiled.environment.filters["hierarchy"] = hierarchy
|
||||
compiled.environment.filters["ascii_escape"] = ascii_escape
|
||||
compiled.environment.filters["tcl_escape"] = tcl_escape
|
||||
compiled.environment.filters["tcl_quote"] = tcl_quote
|
||||
except jinja2.TemplateSyntaxError as e:
|
||||
e.args = ("{} (at {}:{})".format(e.message, origin, e.lineno),)
|
||||
raise
|
||||
return compiled.render({
|
||||
"name": name,
|
||||
"platform": self,
|
||||
"emit_rtlil": emit_rtlil,
|
||||
"emit_verilog": emit_verilog,
|
||||
"emit_debug_verilog": emit_debug_verilog,
|
||||
"emit_commands": emit_commands,
|
||||
"syntax": syntax,
|
||||
"invoke_tool": invoke_tool,
|
||||
"get_override": get_override,
|
||||
"verbose": verbose,
|
||||
"quiet": quiet,
|
||||
"autogenerated": autogenerated,
|
||||
})
|
||||
|
||||
plan = BuildPlan(script="build_{}".format(name))
|
||||
for filename_tpl, content_tpl in self.file_templates.items():
|
||||
plan.add_file(render(filename_tpl, origin=filename_tpl),
|
||||
render(content_tpl, origin=content_tpl))
|
||||
for filename, content in self.extra_files.items():
|
||||
plan.add_file(filename, content)
|
||||
return plan
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.build.plat, use amaranth.build.plat",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,256 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
from ..hdl.ast import *
|
||||
from ..hdl.rec import *
|
||||
from ..lib.io import *
|
||||
|
||||
from .dsl import *
|
||||
from amaranth.build.res import *
|
||||
from amaranth.build.res import __all__
|
||||
|
||||
|
||||
__all__ = ["ResourceError", "ResourceManager"]
|
||||
|
||||
|
||||
class ResourceError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceManager:
|
||||
def __init__(self, resources, connectors):
|
||||
self.resources = OrderedDict()
|
||||
self._requested = OrderedDict()
|
||||
self._phys_reqd = OrderedDict()
|
||||
|
||||
self.connectors = OrderedDict()
|
||||
self._conn_pins = OrderedDict()
|
||||
|
||||
# Constraint lists
|
||||
self._ports = []
|
||||
self._clocks = SignalDict()
|
||||
|
||||
self.add_resources(resources)
|
||||
self.add_connectors(connectors)
|
||||
|
||||
def add_resources(self, resources):
|
||||
for res in resources:
|
||||
if not isinstance(res, Resource):
|
||||
raise TypeError("Object {!r} is not a Resource".format(res))
|
||||
if (res.name, res.number) in self.resources:
|
||||
raise NameError("Trying to add {!r}, but {!r} has the same name and number"
|
||||
.format(res, self.resources[res.name, res.number]))
|
||||
self.resources[res.name, res.number] = res
|
||||
|
||||
def add_connectors(self, connectors):
|
||||
for conn in connectors:
|
||||
if not isinstance(conn, Connector):
|
||||
raise TypeError("Object {!r} is not a Connector".format(conn))
|
||||
if (conn.name, conn.number) in self.connectors:
|
||||
raise NameError("Trying to add {!r}, but {!r} has the same name and number"
|
||||
.format(conn, self.connectors[conn.name, conn.number]))
|
||||
self.connectors[conn.name, conn.number] = conn
|
||||
|
||||
for conn_pin, plat_pin in conn:
|
||||
assert conn_pin not in self._conn_pins
|
||||
self._conn_pins[conn_pin] = plat_pin
|
||||
|
||||
def lookup(self, name, number=0):
|
||||
if (name, number) not in self.resources:
|
||||
raise ResourceError("Resource {}#{} does not exist"
|
||||
.format(name, number))
|
||||
return self.resources[name, number]
|
||||
|
||||
def request(self, name, number=0, *, dir=None, xdr=None):
|
||||
resource = self.lookup(name, number)
|
||||
if (resource.name, resource.number) in self._requested:
|
||||
raise ResourceError("Resource {}#{} has already been requested"
|
||||
.format(name, number))
|
||||
|
||||
def merge_options(subsignal, dir, xdr):
|
||||
if isinstance(subsignal.ios[0], Subsignal):
|
||||
if dir is None:
|
||||
dir = dict()
|
||||
if xdr is None:
|
||||
xdr = dict()
|
||||
if not isinstance(dir, dict):
|
||||
raise TypeError("Directions must be a dict, not {!r}, because {!r} "
|
||||
"has subsignals"
|
||||
.format(dir, subsignal))
|
||||
if not isinstance(xdr, dict):
|
||||
raise TypeError("Data rate must be a dict, not {!r}, because {!r} "
|
||||
"has subsignals"
|
||||
.format(xdr, subsignal))
|
||||
for sub in subsignal.ios:
|
||||
sub_dir = dir.get(sub.name, None)
|
||||
sub_xdr = xdr.get(sub.name, None)
|
||||
dir[sub.name], xdr[sub.name] = merge_options(sub, sub_dir, sub_xdr)
|
||||
else:
|
||||
if dir is None:
|
||||
dir = subsignal.ios[0].dir
|
||||
if xdr is None:
|
||||
xdr = 0
|
||||
if dir not in ("i", "o", "oe", "io", "-"):
|
||||
raise TypeError("Direction must be one of \"i\", \"o\", \"oe\", \"io\", "
|
||||
"or \"-\", not {!r}"
|
||||
.format(dir))
|
||||
if dir != subsignal.ios[0].dir and \
|
||||
not (subsignal.ios[0].dir == "io" or dir == "-"):
|
||||
raise ValueError("Direction of {!r} cannot be changed from \"{}\" to \"{}\"; "
|
||||
"direction can be changed from \"io\" to \"i\", \"o\", or "
|
||||
"\"oe\", or from anything to \"-\""
|
||||
.format(subsignal.ios[0], subsignal.ios[0].dir, dir))
|
||||
if not isinstance(xdr, int) or xdr < 0:
|
||||
raise ValueError("Data rate of {!r} must be a non-negative integer, not {!r}"
|
||||
.format(subsignal.ios[0], xdr))
|
||||
return dir, xdr
|
||||
|
||||
def resolve(resource, dir, xdr, name, attrs):
|
||||
for attr_key, attr_value in attrs.items():
|
||||
if hasattr(attr_value, "__call__"):
|
||||
attr_value = attr_value(self)
|
||||
assert attr_value is None or isinstance(attr_value, str)
|
||||
if attr_value is None:
|
||||
del attrs[attr_key]
|
||||
else:
|
||||
attrs[attr_key] = attr_value
|
||||
|
||||
if isinstance(resource.ios[0], Subsignal):
|
||||
fields = OrderedDict()
|
||||
for sub in resource.ios:
|
||||
fields[sub.name] = resolve(sub, dir[sub.name], xdr[sub.name],
|
||||
name="{}__{}".format(name, sub.name),
|
||||
attrs={**attrs, **sub.attrs})
|
||||
return Record([
|
||||
(f_name, f.layout) for (f_name, f) in fields.items()
|
||||
], fields=fields, name=name)
|
||||
|
||||
elif isinstance(resource.ios[0], (Pins, DiffPairs)):
|
||||
phys = resource.ios[0]
|
||||
if isinstance(phys, Pins):
|
||||
phys_names = phys.names
|
||||
port = Record([("io", len(phys))], name=name)
|
||||
if isinstance(phys, DiffPairs):
|
||||
phys_names = []
|
||||
record_fields = []
|
||||
if not self.should_skip_port_component(None, attrs, "p"):
|
||||
phys_names += phys.p.names
|
||||
record_fields.append(("p", len(phys)))
|
||||
if not self.should_skip_port_component(None, attrs, "n"):
|
||||
phys_names += phys.n.names
|
||||
record_fields.append(("n", len(phys)))
|
||||
port = Record(record_fields, name=name)
|
||||
if dir == "-":
|
||||
pin = None
|
||||
else:
|
||||
pin = Pin(len(phys), dir, xdr=xdr, name=name)
|
||||
|
||||
for phys_name in phys_names:
|
||||
if phys_name in self._phys_reqd:
|
||||
raise ResourceError("Resource component {} uses physical pin {}, but it "
|
||||
"is already used by resource component {} that was "
|
||||
"requested earlier"
|
||||
.format(name, phys_name, self._phys_reqd[phys_name]))
|
||||
self._phys_reqd[phys_name] = name
|
||||
|
||||
self._ports.append((resource, pin, port, attrs))
|
||||
|
||||
if pin is not None and resource.clock is not None:
|
||||
self.add_clock_constraint(pin.i, resource.clock.frequency)
|
||||
|
||||
return pin if pin is not None else port
|
||||
|
||||
else:
|
||||
assert False # :nocov:
|
||||
|
||||
value = resolve(resource,
|
||||
*merge_options(resource, dir, xdr),
|
||||
name="{}_{}".format(resource.name, resource.number),
|
||||
attrs=resource.attrs)
|
||||
self._requested[resource.name, resource.number] = value
|
||||
return value
|
||||
|
||||
def iter_single_ended_pins(self):
|
||||
for res, pin, port, attrs in self._ports:
|
||||
if pin is None:
|
||||
continue
|
||||
if isinstance(res.ios[0], Pins):
|
||||
yield pin, port, attrs, res.ios[0].invert
|
||||
|
||||
def iter_differential_pins(self):
|
||||
for res, pin, port, attrs in self._ports:
|
||||
if pin is None:
|
||||
continue
|
||||
if isinstance(res.ios[0], DiffPairs):
|
||||
yield pin, port, attrs, res.ios[0].invert
|
||||
|
||||
def should_skip_port_component(self, port, attrs, component):
|
||||
return False
|
||||
|
||||
def iter_ports(self):
|
||||
for res, pin, port, attrs in self._ports:
|
||||
if isinstance(res.ios[0], Pins):
|
||||
if not self.should_skip_port_component(port, attrs, "io"):
|
||||
yield port.io
|
||||
elif isinstance(res.ios[0], DiffPairs):
|
||||
if not self.should_skip_port_component(port, attrs, "p"):
|
||||
yield port.p
|
||||
if not self.should_skip_port_component(port, attrs, "n"):
|
||||
yield port.n
|
||||
else:
|
||||
assert False
|
||||
|
||||
def iter_port_constraints(self):
|
||||
for res, pin, port, attrs in self._ports:
|
||||
if isinstance(res.ios[0], Pins):
|
||||
if not self.should_skip_port_component(port, attrs, "io"):
|
||||
yield port.io.name, res.ios[0].map_names(self._conn_pins, res), attrs
|
||||
elif isinstance(res.ios[0], DiffPairs):
|
||||
if not self.should_skip_port_component(port, attrs, "p"):
|
||||
yield port.p.name, res.ios[0].p.map_names(self._conn_pins, res), attrs
|
||||
if not self.should_skip_port_component(port, attrs, "n"):
|
||||
yield port.n.name, res.ios[0].n.map_names(self._conn_pins, res), attrs
|
||||
else:
|
||||
assert False
|
||||
|
||||
def iter_port_constraints_bits(self):
|
||||
for port_name, pin_names, attrs in self.iter_port_constraints():
|
||||
if len(pin_names) == 1:
|
||||
yield port_name, pin_names[0], attrs
|
||||
else:
|
||||
for bit, pin_name in enumerate(pin_names):
|
||||
yield "{}[{}]".format(port_name, bit), pin_name, attrs
|
||||
|
||||
def add_clock_constraint(self, clock, frequency):
|
||||
if not isinstance(clock, Signal):
|
||||
raise TypeError("Object {!r} is not a Signal".format(clock))
|
||||
if not isinstance(frequency, (int, float)):
|
||||
raise TypeError("Frequency must be a number, not {!r}".format(frequency))
|
||||
|
||||
if clock in self._clocks:
|
||||
raise ValueError("Cannot add clock constraint on {!r}, which is already constrained "
|
||||
"to {} Hz"
|
||||
.format(clock, self._clocks[clock]))
|
||||
else:
|
||||
self._clocks[clock] = float(frequency)
|
||||
|
||||
def iter_clock_constraints(self):
|
||||
# Back-propagate constraints through the input buffer. For clock constraints on pins
|
||||
# (the majority of cases), toolchains work better if the constraint is defined on the pin
|
||||
# and not on the buffered internal net; and if the toolchain is advanced enough that
|
||||
# it considers clock phase and delay of the input buffer, it is *necessary* to define
|
||||
# the constraint on the pin to match the designer's expectation of phase being referenced
|
||||
# to the pin.
|
||||
#
|
||||
# Constraints on nets with no corresponding input pin (e.g. PLL or SERDES outputs) are not
|
||||
# affected.
|
||||
pin_i_to_port = SignalDict()
|
||||
for res, pin, port, attrs in self._ports:
|
||||
if hasattr(pin, "i"):
|
||||
if isinstance(res.ios[0], Pins):
|
||||
pin_i_to_port[pin.i] = port.io
|
||||
elif isinstance(res.ios[0], DiffPairs):
|
||||
pin_i_to_port[pin.i] = port.p
|
||||
else:
|
||||
assert False
|
||||
|
||||
for net_signal, frequency in self._clocks.items():
|
||||
port_signal = pin_i_to_port.get(net_signal)
|
||||
yield net_signal, port_signal, frequency
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.build.res, use amaranth.build.res",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,268 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
from contextlib import contextmanager
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import tempfile
|
||||
import zipfile
|
||||
import hashlib
|
||||
import pathlib
|
||||
from amaranth.build.run import *
|
||||
from amaranth.build.run import __all__
|
||||
|
||||
|
||||
__all__ = ["BuildPlan", "BuildProducts", "LocalBuildProducts", "RemoteSSHBuildProducts"]
|
||||
|
||||
|
||||
|
||||
class BuildPlan:
|
||||
def __init__(self, script):
|
||||
"""A build plan.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
script : str
|
||||
The base name (without extension) of the script that will be executed.
|
||||
"""
|
||||
self.script = script
|
||||
self.files = OrderedDict()
|
||||
|
||||
def add_file(self, filename, content):
|
||||
"""
|
||||
Add ``content``, which can be a :class:`str`` or :class:`bytes`, to the build plan
|
||||
as ``filename``. The file name can be a relative path with directories separated by
|
||||
forward slashes (``/``).
|
||||
"""
|
||||
assert isinstance(filename, str) and filename not in self.files
|
||||
self.files[filename] = content
|
||||
|
||||
def digest(self, size=64):
|
||||
"""
|
||||
Compute a `digest`, a short byte sequence deterministically and uniquely identifying
|
||||
this build plan.
|
||||
"""
|
||||
hasher = hashlib.blake2b(digest_size=size)
|
||||
for filename in sorted(self.files):
|
||||
hasher.update(filename.encode("utf-8"))
|
||||
content = self.files[filename]
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
hasher.update(content)
|
||||
hasher.update(self.script.encode("utf-8"))
|
||||
return hasher.digest()
|
||||
|
||||
def archive(self, file):
|
||||
"""
|
||||
Archive files from the build plan into ``file``, which can be either a filename, or
|
||||
a file-like object. The produced archive is deterministic: exact same files will
|
||||
always produce exact same archive.
|
||||
"""
|
||||
with zipfile.ZipFile(file, "w") as archive:
|
||||
# Write archive members in deterministic order and with deterministic timestamp.
|
||||
for filename in sorted(self.files):
|
||||
archive.writestr(zipfile.ZipInfo(filename), self.files[filename])
|
||||
|
||||
def execute_local(self, root="build", *, run_script=True):
|
||||
"""
|
||||
Execute build plan using the local strategy. Files from the build plan are placed in
|
||||
the build root directory ``root``, and, if ``run_script`` is ``True``, the script
|
||||
appropriate for the platform (``{script}.bat`` on Windows, ``{script}.sh`` elsewhere) is
|
||||
executed in the build root.
|
||||
|
||||
Returns :class:`LocalBuildProducts`.
|
||||
"""
|
||||
os.makedirs(root, exist_ok=True)
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(root)
|
||||
|
||||
for filename, content in self.files.items():
|
||||
filename = pathlib.Path(filename)
|
||||
# Forbid parent directory components completely to avoid the possibility
|
||||
# of writing outside the build root.
|
||||
assert ".." not in filename.parts
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname:
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
|
||||
mode = "wt" if isinstance(content, str) else "wb"
|
||||
with open(filename, mode) as f:
|
||||
f.write(content)
|
||||
|
||||
if run_script:
|
||||
if sys.platform.startswith("win32"):
|
||||
# Without "call", "cmd /c {}.bat" will return 0.
|
||||
# See https://stackoverflow.com/a/30736987 for a detailed explanation of why.
|
||||
# Running the script manually from a command prompt is unaffected.
|
||||
subprocess.check_call(["cmd", "/c", "call {}.bat".format(self.script)])
|
||||
else:
|
||||
subprocess.check_call(["sh", "{}.sh".format(self.script)])
|
||||
|
||||
return LocalBuildProducts(os.getcwd())
|
||||
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
def execute_remote_ssh(self, *, connect_to = {}, root, run_script=True):
|
||||
"""
|
||||
Execute build plan using the remote SSH strategy. Files from the build
|
||||
plan are transferred via SFTP to the directory ``root`` on a remote
|
||||
server. If ``run_script`` is ``True``, the ``paramiko`` SSH client will
|
||||
then run ``{script}.sh``. ``root`` can either be an absolute or
|
||||
relative (to the login directory) path.
|
||||
|
||||
``connect_to`` is a dictionary that holds all input arguments to
|
||||
``paramiko``'s ``SSHClient.connect``
|
||||
(`documentation <http://docs.paramiko.org/en/stable/api/client.html#paramiko.client.SSHClient.connect>`_).
|
||||
At a minimum, the ``hostname`` input argument must be supplied in this
|
||||
dictionary as the remote server.
|
||||
|
||||
Returns :class:`RemoteSSHBuildProducts`.
|
||||
"""
|
||||
from paramiko import SSHClient
|
||||
|
||||
with SSHClient() as client:
|
||||
client.load_system_host_keys()
|
||||
client.connect(**connect_to)
|
||||
|
||||
with client.open_sftp() as sftp:
|
||||
def mkdir_exist_ok(path):
|
||||
try:
|
||||
sftp.mkdir(str(path))
|
||||
except IOError as e:
|
||||
# mkdir fails if directory exists. This is fine in nmigen.build.
|
||||
# Reraise errors containing e.errno info.
|
||||
if e.errno:
|
||||
raise e
|
||||
|
||||
def mkdirs(path):
|
||||
# Iteratively create parent directories of a file by iterating over all
|
||||
# parents except for the root ("."). Slicing the parents results in
|
||||
# TypeError, so skip over the root ("."); this also handles files
|
||||
# already in the root directory.
|
||||
for parent in reversed(path.parents):
|
||||
if parent == pathlib.PurePosixPath("."):
|
||||
continue
|
||||
else:
|
||||
mkdir_exist_ok(parent)
|
||||
|
||||
mkdir_exist_ok(root)
|
||||
|
||||
sftp.chdir(root)
|
||||
for filename, content in self.files.items():
|
||||
filename = pathlib.PurePosixPath(filename)
|
||||
assert ".." not in filename.parts
|
||||
|
||||
mkdirs(filename)
|
||||
|
||||
mode = "wt" if isinstance(content, str) else "wb"
|
||||
with sftp.file(str(filename), mode) as f:
|
||||
# "b/t" modifier ignored in SFTP.
|
||||
if mode == "wt":
|
||||
f.write(content.encode("utf-8"))
|
||||
else:
|
||||
f.write(content)
|
||||
|
||||
if run_script:
|
||||
transport = client.get_transport()
|
||||
channel = transport.open_session()
|
||||
channel.set_combine_stderr(True)
|
||||
|
||||
cmd = "if [ -f ~/.profile ]; then . ~/.profile; fi && cd {} && sh {}.sh".format(root, self.script)
|
||||
channel.exec_command(cmd)
|
||||
|
||||
# Show the output from the server while products are built.
|
||||
buf = channel.recv(1024)
|
||||
while buf:
|
||||
print(buf.decode("utf-8"), end="")
|
||||
buf = channel.recv(1024)
|
||||
|
||||
return RemoteSSHBuildProducts(connect_to, root)
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
Execute build plan using the default strategy. Use one of the ``execute_*`` methods
|
||||
explicitly to have more control over the strategy.
|
||||
"""
|
||||
return self.execute_local()
|
||||
|
||||
|
||||
class BuildProducts(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def get(self, filename, mode="b"):
|
||||
"""
|
||||
Extract ``filename`` from build products, and return it as a :class:`bytes` (if ``mode``
|
||||
is ``"b"``) or a :class:`str` (if ``mode`` is ``"t"``).
|
||||
"""
|
||||
assert mode in ("b", "t")
|
||||
|
||||
@contextmanager
|
||||
def extract(self, *filenames):
|
||||
"""
|
||||
Extract ``filenames`` from build products, place them in an OS-specific temporary file
|
||||
location, with the extension preserved, and delete them afterwards. This method is used
|
||||
as a context manager, e.g.: ::
|
||||
|
||||
with products.extract("bitstream.bin", "programmer.cfg") \
|
||||
as bitstream_filename, config_filename:
|
||||
subprocess.check_call(["program", "-c", config_filename, bitstream_filename])
|
||||
"""
|
||||
files = []
|
||||
try:
|
||||
for filename in filenames:
|
||||
# On Windows, a named temporary file (as created by Python) is not accessible to
|
||||
# others if it's still open within the Python process, so we close it and delete
|
||||
# it manually.
|
||||
file = tempfile.NamedTemporaryFile(
|
||||
prefix="nmigen_", suffix="_" + os.path.basename(filename),
|
||||
delete=False)
|
||||
files.append(file)
|
||||
file.write(self.get(filename))
|
||||
file.close()
|
||||
|
||||
if len(files) == 0:
|
||||
return (yield)
|
||||
elif len(files) == 1:
|
||||
return (yield files[0].name)
|
||||
else:
|
||||
return (yield [file.name for file in files])
|
||||
finally:
|
||||
for file in files:
|
||||
os.unlink(file.name)
|
||||
|
||||
|
||||
class LocalBuildProducts(BuildProducts):
|
||||
def __init__(self, root):
|
||||
# We provide no guarantees that files will be available on the local filesystem (i.e. in
|
||||
# any way other than through `products.get()`) in general, so downstream code must never
|
||||
# rely on this, even when we happen to use a local build most of the time.
|
||||
self.__root = root
|
||||
|
||||
def get(self, filename, mode="b"):
|
||||
super().get(filename, mode)
|
||||
with open(os.path.join(self.__root, filename), "r" + mode) as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
class RemoteSSHBuildProducts(BuildProducts):
|
||||
def __init__(self, connect_to, root):
|
||||
self.__connect_to = connect_to
|
||||
self.__root = root
|
||||
|
||||
def get(self, filename, mode="b"):
|
||||
super().get(filename, mode)
|
||||
|
||||
from paramiko import SSHClient
|
||||
|
||||
with SSHClient() as client:
|
||||
client.load_system_host_keys()
|
||||
client.connect(**self.__connect_to)
|
||||
|
||||
with client.open_sftp() as sftp:
|
||||
sftp.chdir(self.__root)
|
||||
|
||||
with sftp.file(filename, "r" + mode) as f:
|
||||
# "b/t" modifier ignored in SFTP.
|
||||
if mode == "t":
|
||||
return f.read().decode("utf-8")
|
||||
else:
|
||||
return f.read()
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.build.run, use amaranth.build.run",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,78 +1,7 @@
|
|||
import argparse
|
||||
|
||||
from .hdl.ir import Fragment
|
||||
from .back import rtlil, cxxrtl, verilog
|
||||
from .sim import Simulator
|
||||
from amaranth.cli import *
|
||||
from amaranth.cli import __all__
|
||||
|
||||
|
||||
__all__ = ["main"]
|
||||
|
||||
|
||||
def main_parser(parser=None):
|
||||
if parser is None:
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
p_action = parser.add_subparsers(dest="action")
|
||||
|
||||
p_generate = p_action.add_parser("generate",
|
||||
help="generate RTLIL, Verilog or CXXRTL from the design")
|
||||
p_generate.add_argument("-t", "--type", dest="generate_type",
|
||||
metavar="LANGUAGE", choices=["il", "cc", "v"],
|
||||
help="generate LANGUAGE (il for RTLIL, v for Verilog, cc for CXXRTL; default: file extension of FILE, if given)")
|
||||
p_generate.add_argument("generate_file",
|
||||
metavar="FILE", type=argparse.FileType("w"), nargs="?",
|
||||
help="write generated code to FILE")
|
||||
|
||||
p_simulate = p_action.add_parser(
|
||||
"simulate", help="simulate the design")
|
||||
p_simulate.add_argument("-v", "--vcd-file",
|
||||
metavar="VCD-FILE", type=argparse.FileType("w"),
|
||||
help="write execution trace to VCD-FILE")
|
||||
p_simulate.add_argument("-w", "--gtkw-file",
|
||||
metavar="GTKW-FILE", type=argparse.FileType("w"),
|
||||
help="write GTKWave configuration to GTKW-FILE")
|
||||
p_simulate.add_argument("-p", "--period", dest="sync_period",
|
||||
metavar="TIME", type=float, default=1e-6,
|
||||
help="set 'sync' clock domain period to TIME (default: %(default)s)")
|
||||
p_simulate.add_argument("-c", "--clocks", dest="sync_clocks",
|
||||
metavar="COUNT", type=int, required=True,
|
||||
help="simulate for COUNT 'sync' clock periods")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main_runner(parser, args, design, platform=None, name="top", ports=()):
|
||||
if args.action == "generate":
|
||||
fragment = Fragment.get(design, platform)
|
||||
generate_type = args.generate_type
|
||||
if generate_type is None and args.generate_file:
|
||||
if args.generate_file.name.endswith(".il"):
|
||||
generate_type = "il"
|
||||
if args.generate_file.name.endswith(".cc"):
|
||||
generate_type = "cc"
|
||||
if args.generate_file.name.endswith(".v"):
|
||||
generate_type = "v"
|
||||
if generate_type is None:
|
||||
parser.error("Unable to auto-detect language, specify explicitly with -t/--type")
|
||||
if generate_type == "il":
|
||||
output = rtlil.convert(fragment, name=name, ports=ports)
|
||||
if generate_type == "cc":
|
||||
output = cxxrtl.convert(fragment, name=name, ports=ports)
|
||||
if generate_type == "v":
|
||||
output = verilog.convert(fragment, name=name, ports=ports)
|
||||
if args.generate_file:
|
||||
args.generate_file.write(output)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
if args.action == "simulate":
|
||||
fragment = Fragment.get(design, platform)
|
||||
sim = Simulator(fragment)
|
||||
sim.add_clock(args.sync_period)
|
||||
with sim.write_vcd(vcd_file=args.vcd_file, gtkw_file=args.gtkw_file, traces=ports):
|
||||
sim.run_until(args.sync_period * args.sync_clocks, run_passive=True)
|
||||
|
||||
|
||||
def main(*args, **kwargs):
|
||||
parser = main_parser()
|
||||
main_runner(parser, parser.parse_args(), *args, **kwargs)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.cli, use amaranth.cli",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,6 @@
|
|||
from .fhdl.structure import *
|
||||
from .fhdl.module import *
|
||||
from .fhdl.specials import *
|
||||
from .fhdl.bitcontainer import *
|
||||
from .fhdl.decorators import *
|
||||
# from .fhdl.simplify import *
|
||||
from amaranth.compat import *
|
||||
|
||||
from .sim import *
|
||||
|
||||
from .genlib.record import *
|
||||
from .genlib.fsm import *
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat, use amaranth.compat",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
from amaranth.compat.fhdl import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.fhdl, use amaranth.compat.fhdl",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
@ -1,21 +1,7 @@
|
|||
from ... import utils
|
||||
from ...hdl import ast
|
||||
from ..._utils import deprecated
|
||||
from amaranth.compat.fhdl.bitcontainer import *
|
||||
from amaranth.compat.fhdl.bitcontainer import __all__
|
||||
|
||||
|
||||
__all__ = ["log2_int", "bits_for", "value_bits_sign"]
|
||||
|
||||
|
||||
@deprecated("instead of `log2_int`, use `nmigen.utils.log2_int`")
|
||||
def log2_int(n, need_pow2=True):
|
||||
return utils.log2_int(n, need_pow2)
|
||||
|
||||
|
||||
@deprecated("instead of `bits_for`, use `nmigen.utils.bits_for`")
|
||||
def bits_for(n, require_sign_bit=False):
|
||||
return utils.bits_for(n, require_sign_bit)
|
||||
|
||||
|
||||
@deprecated("instead of `value_bits_sign(v)`, use `v.shape()`")
|
||||
def value_bits_sign(v):
|
||||
return tuple(ast.Value.cast(v).shape())
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.fhdl.bitcontainer, use amaranth.compat.fhdl.bitcontainer",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,35 +1,6 @@
|
|||
from operator import itemgetter
|
||||
from amaranth.compat.fhdl.conv_output import *
|
||||
|
||||
|
||||
class ConvOutput:
|
||||
def __init__(self):
|
||||
self.main_source = ""
|
||||
self.data_files = dict()
|
||||
|
||||
def set_main_source(self, src):
|
||||
self.main_source = src
|
||||
|
||||
def add_data_file(self, filename_base, content):
|
||||
filename = filename_base
|
||||
i = 1
|
||||
while filename in self.data_files:
|
||||
parts = filename_base.split(".", maxsplit=1)
|
||||
parts[0] += "_" + str(i)
|
||||
filename = ".".join(parts)
|
||||
i += 1
|
||||
self.data_files[filename] = content
|
||||
return filename
|
||||
|
||||
def __str__(self):
|
||||
r = self.main_source + "\n"
|
||||
for filename, content in sorted(self.data_files.items(),
|
||||
key=itemgetter(0)):
|
||||
r += filename + ":\n" + content
|
||||
return r
|
||||
|
||||
def write(self, main_filename):
|
||||
with open(main_filename, "w") as f:
|
||||
f.write(self.main_source)
|
||||
for filename, content in self.data_files.items():
|
||||
with open(filename, "w") as f:
|
||||
f.write(content)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.fhdl.conv_output, use amaranth.compat.fhdl.conv_output",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,55 +1,7 @@
|
|||
from ...hdl.ast import *
|
||||
from ...hdl.xfrm import ResetInserter as NativeResetInserter
|
||||
from ...hdl.xfrm import EnableInserter as NativeEnableInserter
|
||||
from ...hdl.xfrm import DomainRenamer as NativeDomainRenamer
|
||||
from ..._utils import deprecated
|
||||
from amaranth.compat.fhdl.decorators import *
|
||||
from amaranth.compat.fhdl.decorators import __all__
|
||||
|
||||
|
||||
__all__ = ["ResetInserter", "CEInserter", "ClockDomainsRenamer"]
|
||||
|
||||
|
||||
class _CompatControlInserter:
|
||||
_control_name = None
|
||||
_native_inserter = None
|
||||
|
||||
def __init__(self, clock_domains=None):
|
||||
self.clock_domains = clock_domains
|
||||
|
||||
def __call__(self, module):
|
||||
if self.clock_domains is None:
|
||||
signals = {self._control_name: ("sync", Signal(name=self._control_name))}
|
||||
else:
|
||||
def name(cd):
|
||||
return self._control_name + "_" + cd
|
||||
signals = {name(cd): (cd, Signal(name=name(cd))) for cd in self.clock_domains}
|
||||
for name, (cd, signal) in signals.items():
|
||||
setattr(module, name, signal)
|
||||
return self._native_inserter(dict(signals.values()))(module)
|
||||
|
||||
|
||||
@deprecated("instead of `migen.fhdl.decorators.ResetInserter`, "
|
||||
"use `nmigen.hdl.xfrm.ResetInserter`; note that nMigen ResetInserter accepts "
|
||||
"a dict of reset signals (or a single reset signal) as an argument, not "
|
||||
"a set of clock domain names (or a single clock domain name)")
|
||||
class CompatResetInserter(_CompatControlInserter):
|
||||
_control_name = "reset"
|
||||
_native_inserter = NativeResetInserter
|
||||
|
||||
|
||||
@deprecated("instead of `migen.fhdl.decorators.CEInserter`, "
|
||||
"use `nmigen.hdl.xfrm.EnableInserter`; note that nMigen EnableInserter accepts "
|
||||
"a dict of enable signals (or a single enable signal) as an argument, not "
|
||||
"a set of clock domain names (or a single clock domain name)")
|
||||
class CompatCEInserter(_CompatControlInserter):
|
||||
_control_name = "ce"
|
||||
_native_inserter = NativeEnableInserter
|
||||
|
||||
|
||||
class CompatClockDomainsRenamer(NativeDomainRenamer):
|
||||
def __init__(self, cd_remapping):
|
||||
super().__init__(cd_remapping)
|
||||
|
||||
|
||||
ResetInserter = CompatResetInserter
|
||||
CEInserter = CompatCEInserter
|
||||
ClockDomainsRenamer = CompatClockDomainsRenamer
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.fhdl.decorators, use amaranth.compat.fhdl.decorators",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,163 +1,7 @@
|
|||
from collections.abc import Iterable
|
||||
|
||||
from ..._utils import flatten, deprecated
|
||||
from ...hdl import dsl, ir
|
||||
from amaranth.compat.fhdl.module import *
|
||||
from amaranth.compat.fhdl.module import __all__
|
||||
|
||||
|
||||
__all__ = ["Module", "FinalizeError"]
|
||||
|
||||
|
||||
def _flat_list(e):
|
||||
if isinstance(e, Iterable):
|
||||
return list(flatten(e))
|
||||
else:
|
||||
return [e]
|
||||
|
||||
|
||||
class CompatFinalizeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
FinalizeError = CompatFinalizeError
|
||||
|
||||
|
||||
class _CompatModuleProxy:
|
||||
def __init__(self, cm):
|
||||
object.__setattr__(self, "_cm", cm)
|
||||
|
||||
|
||||
class _CompatModuleComb(_CompatModuleProxy):
|
||||
@deprecated("instead of `self.comb +=`, use `m.d.comb +=`")
|
||||
def __iadd__(self, assigns):
|
||||
self._cm._module._add_statement(assigns, domain=None, depth=0, compat_mode=True)
|
||||
return self
|
||||
|
||||
|
||||
class _CompatModuleSyncCD:
|
||||
def __init__(self, cm, cd):
|
||||
self._cm = cm
|
||||
self._cd = cd
|
||||
|
||||
@deprecated("instead of `self.sync.<domain> +=`, use `m.d.<domain> +=`")
|
||||
def __iadd__(self, assigns):
|
||||
self._cm._module._add_statement(assigns, domain=self._cd, depth=0, compat_mode=True)
|
||||
return self
|
||||
|
||||
|
||||
class _CompatModuleSync(_CompatModuleProxy):
|
||||
@deprecated("instead of `self.sync +=`, use `m.d.sync +=`")
|
||||
def __iadd__(self, assigns):
|
||||
self._cm._module._add_statement(assigns, domain="sync", depth=0, compat_mode=True)
|
||||
return self
|
||||
|
||||
def __getattr__(self, name):
|
||||
return _CompatModuleSyncCD(self._cm, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if not isinstance(value, _CompatModuleSyncCD):
|
||||
raise AttributeError("Attempted to assign sync property - use += instead")
|
||||
|
||||
|
||||
class _CompatModuleSpecials(_CompatModuleProxy):
|
||||
@deprecated("instead of `self.specials.<name> =`, use `m.submodules.<name> =`")
|
||||
def __setattr__(self, name, value):
|
||||
self._cm._submodules.append((name, value))
|
||||
setattr(self._cm, name, value)
|
||||
|
||||
@deprecated("instead of `self.specials +=`, use `m.submodules +=`")
|
||||
def __iadd__(self, other):
|
||||
self._cm._submodules += [(None, e) for e in _flat_list(other)]
|
||||
return self
|
||||
|
||||
|
||||
class _CompatModuleSubmodules(_CompatModuleProxy):
|
||||
@deprecated("instead of `self.submodules.<name> =`, use `m.submodules.<name> =`")
|
||||
def __setattr__(self, name, value):
|
||||
self._cm._submodules.append((name, value))
|
||||
setattr(self._cm, name, value)
|
||||
|
||||
@deprecated("instead of `self.submodules +=`, use `m.submodules +=`")
|
||||
def __iadd__(self, other):
|
||||
self._cm._submodules += [(None, e) for e in _flat_list(other)]
|
||||
return self
|
||||
|
||||
|
||||
class _CompatModuleClockDomains(_CompatModuleProxy):
|
||||
@deprecated("instead of `self.clock_domains.<name> =`, use `m.domains.<name> =`")
|
||||
def __setattr__(self, name, value):
|
||||
self.__iadd__(value)
|
||||
setattr(self._cm, name, value)
|
||||
|
||||
@deprecated("instead of `self.clock_domains +=`, use `m.domains +=`")
|
||||
def __iadd__(self, other):
|
||||
self._cm._module.domains += _flat_list(other)
|
||||
return self
|
||||
|
||||
|
||||
class CompatModule(ir.Elaboratable):
|
||||
_MustUse__silence = True
|
||||
|
||||
# Actually returns another nMigen Elaboratable (nmigen.dsl.Module), not a Fragment.
|
||||
def get_fragment(self):
|
||||
assert not self.get_fragment_called
|
||||
self.get_fragment_called = True
|
||||
self.finalize()
|
||||
return self._module
|
||||
|
||||
def elaborate(self, platform):
|
||||
if not self.get_fragment_called:
|
||||
self.get_fragment()
|
||||
return self._module
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == "comb":
|
||||
return _CompatModuleComb(self)
|
||||
elif name == "sync":
|
||||
return _CompatModuleSync(self)
|
||||
elif name == "specials":
|
||||
return _CompatModuleSpecials(self)
|
||||
elif name == "submodules":
|
||||
return _CompatModuleSubmodules(self)
|
||||
elif name == "clock_domains":
|
||||
return _CompatModuleClockDomains(self)
|
||||
elif name == "finalized":
|
||||
self.finalized = False
|
||||
return self.finalized
|
||||
elif name == "_module":
|
||||
self._module = dsl.Module()
|
||||
return self._module
|
||||
elif name == "_submodules":
|
||||
self._submodules = []
|
||||
return self._submodules
|
||||
elif name == "_clock_domains":
|
||||
self._clock_domains = []
|
||||
return self._clock_domains
|
||||
elif name == "get_fragment_called":
|
||||
self.get_fragment_called = False
|
||||
return self.get_fragment_called
|
||||
else:
|
||||
raise AttributeError("'{}' object has no attribute '{}'"
|
||||
.format(type(self).__name__, name))
|
||||
|
||||
def finalize(self, *args, **kwargs):
|
||||
def finalize_submodules():
|
||||
for name, submodule in self._submodules:
|
||||
if not hasattr(submodule, "finalize"):
|
||||
continue
|
||||
if submodule.finalized:
|
||||
continue
|
||||
submodule.finalize(*args, **kwargs)
|
||||
|
||||
if not self.finalized:
|
||||
self.finalized = True
|
||||
finalize_submodules()
|
||||
self.do_finalize(*args, **kwargs)
|
||||
finalize_submodules()
|
||||
for name, submodule in self._submodules:
|
||||
self._module._add_submodule(submodule, name)
|
||||
|
||||
def do_finalize(self):
|
||||
pass
|
||||
|
||||
|
||||
Module = CompatModule
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.fhdl.module, use amaranth.compat.fhdl.module",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,145 +1,7 @@
|
|||
from amaranth.compat.fhdl.specials import *
|
||||
from amaranth.compat.fhdl.specials import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from ..._utils import deprecated, extend
|
||||
from ...hdl.ast import *
|
||||
from ...hdl.ir import Elaboratable
|
||||
from ...hdl.mem import Memory as NativeMemory
|
||||
from ...hdl.ir import Fragment, Instance
|
||||
from ...hdl.dsl import Module
|
||||
from .module import Module as CompatModule
|
||||
from .structure import Signal
|
||||
from ...lib.io import Pin
|
||||
|
||||
|
||||
__all__ = ["TSTriple", "Instance", "Memory", "READ_FIRST", "WRITE_FIRST", "NO_CHANGE"]
|
||||
|
||||
|
||||
class TSTriple:
|
||||
def __init__(self, bits_sign=None, min=None, max=None, reset_o=0, reset_oe=0, reset_i=0,
|
||||
name=None):
|
||||
self.o = Signal(bits_sign, min=min, max=max, reset=reset_o,
|
||||
name=None if name is None else name + "_o")
|
||||
self.oe = Signal(reset=reset_oe,
|
||||
name=None if name is None else name + "_oe")
|
||||
self.i = Signal(bits_sign, min=min, max=max, reset=reset_i,
|
||||
name=None if name is None else name + "_i")
|
||||
|
||||
def __len__(self):
|
||||
return len(self.o)
|
||||
|
||||
def get_tristate(self, io):
|
||||
return Tristate(io, self.o, self.oe, self.i)
|
||||
|
||||
|
||||
class Tristate(Elaboratable):
|
||||
def __init__(self, target, o, oe, i=None):
|
||||
self.target = target
|
||||
self.o = o
|
||||
self.oe = oe
|
||||
self.i = i if i is not None else None
|
||||
|
||||
def elaborate(self, platform):
|
||||
if self.i is None:
|
||||
pin = Pin(len(self.target), dir="oe")
|
||||
pin.o = self.o
|
||||
pin.oe = self.oe
|
||||
return platform.get_tristate(pin, self.target, attrs={}, invert=None)
|
||||
else:
|
||||
pin = Pin(len(self.target), dir="io")
|
||||
pin.o = self.o
|
||||
pin.oe = self.oe
|
||||
pin.i = self.i
|
||||
return platform.get_input_output(pin, self.target, attrs={}, invert=None)
|
||||
|
||||
m = Module()
|
||||
if self.i is not None:
|
||||
m.d.comb += self.i.eq(self.target)
|
||||
m.submodules += Instance("$tribuf",
|
||||
p_WIDTH=len(self.target),
|
||||
i_EN=self.oe,
|
||||
i_A=self.o,
|
||||
o_Y=self.target,
|
||||
)
|
||||
|
||||
f = m.elaborate(platform)
|
||||
f.flatten = True
|
||||
return f
|
||||
|
||||
|
||||
(READ_FIRST, WRITE_FIRST, NO_CHANGE) = range(3)
|
||||
|
||||
|
||||
class _MemoryPort(CompatModule):
|
||||
def __init__(self, adr, dat_r, we=None, dat_w=None, async_read=False, re=None,
|
||||
we_granularity=0, mode=WRITE_FIRST, clock_domain="sync"):
|
||||
self.adr = adr
|
||||
self.dat_r = dat_r
|
||||
self.we = we
|
||||
self.dat_w = dat_w
|
||||
self.async_read = async_read
|
||||
self.re = re
|
||||
self.we_granularity = we_granularity
|
||||
self.mode = mode
|
||||
self.clock = ClockSignal(clock_domain)
|
||||
|
||||
|
||||
@extend(NativeMemory)
|
||||
@deprecated("it is not necessary or permitted to add Memory as a special or submodule")
|
||||
def elaborate(self, platform):
|
||||
return Fragment()
|
||||
|
||||
|
||||
class CompatMemory(NativeMemory, Elaboratable):
|
||||
def __init__(self, width, depth, init=None, name=None):
|
||||
super().__init__(width=width, depth=depth, init=init, name=name)
|
||||
|
||||
@deprecated("instead of `get_port()`, use `read_port()` and `write_port()`")
|
||||
def get_port(self, write_capable=False, async_read=False, has_re=False, we_granularity=0,
|
||||
mode=WRITE_FIRST, clock_domain="sync"):
|
||||
if we_granularity >= self.width:
|
||||
warnings.warn("do not specify `we_granularity` greater than memory width, as it "
|
||||
"is a hard error in non-compatibility mode",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
we_granularity = 0
|
||||
if we_granularity == 0:
|
||||
warnings.warn("instead of `we_granularity=0`, use `we_granularity=None` or avoid "
|
||||
"specifying it at all, as it is a hard error in non-compatibility mode",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
we_granularity = None
|
||||
assert mode != NO_CHANGE
|
||||
rdport = self.read_port(domain="comb" if async_read else clock_domain,
|
||||
transparent=mode == WRITE_FIRST)
|
||||
rdport.addr.name = "{}_addr".format(self.name)
|
||||
adr = rdport.addr
|
||||
dat_r = rdport.data
|
||||
if write_capable:
|
||||
wrport = self.write_port(domain=clock_domain, granularity=we_granularity)
|
||||
wrport.addr = rdport.addr
|
||||
we = wrport.en
|
||||
dat_w = wrport.data
|
||||
else:
|
||||
we = None
|
||||
dat_w = None
|
||||
if has_re:
|
||||
if mode == READ_FIRST:
|
||||
re = rdport.en
|
||||
else:
|
||||
warnings.warn("the combination of `has_re=True` and `mode=WRITE_FIRST` has "
|
||||
"surprising behavior: keeping `re` low would merely latch "
|
||||
"the address, while the data will change with changing memory "
|
||||
"contents; avoid using `re` with transparent ports as it is a hard "
|
||||
"error in non-compatibility mode",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
re = Signal()
|
||||
else:
|
||||
re = None
|
||||
mp = _MemoryPort(adr, dat_r, we, dat_w,
|
||||
async_read, re, we_granularity, mode,
|
||||
clock_domain)
|
||||
mp.submodules.rdport = rdport
|
||||
if write_capable:
|
||||
mp.submodules.wrport = wrport
|
||||
return mp
|
||||
|
||||
|
||||
Memory = CompatMemory
|
||||
warnings.warn("instead of nmigen.compat.fhdl.specials, use amaranth.compat.fhdl.specials",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,185 +1,7 @@
|
|||
import builtins
|
||||
from amaranth.compat.fhdl.structure import *
|
||||
from amaranth.compat.fhdl.structure import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
|
||||
from ...utils import bits_for
|
||||
from ..._utils import deprecated, extend
|
||||
from ...hdl import ast
|
||||
from ...hdl.ast import (DUID,
|
||||
Shape, signed, unsigned,
|
||||
Value, Const, C, Mux, Slice as _Slice, Part, Cat, Repl,
|
||||
Signal as NativeSignal,
|
||||
ClockSignal, ResetSignal,
|
||||
Array, ArrayProxy as _ArrayProxy)
|
||||
from ...hdl.cd import ClockDomain
|
||||
|
||||
|
||||
__all__ = ["DUID", "wrap", "Mux", "Cat", "Replicate", "Constant", "C", "Signal", "ClockSignal",
|
||||
"ResetSignal", "If", "Case", "Array", "ClockDomain"]
|
||||
|
||||
|
||||
@deprecated("instead of `wrap`, use `Value.cast`")
|
||||
def wrap(v):
|
||||
return Value.cast(v)
|
||||
|
||||
|
||||
class CompatSignal(NativeSignal):
|
||||
def __init__(self, bits_sign=None, name=None, variable=False, reset=0,
|
||||
reset_less=False, name_override=None, min=None, max=None,
|
||||
related=None, attr=None, src_loc_at=0, **kwargs):
|
||||
if min is not None or max is not None:
|
||||
warnings.warn("instead of `Signal(min={min}, max={max})`, "
|
||||
"use `Signal(range({min}, {max}))`"
|
||||
.format(min=min or 0, max=max or 2),
|
||||
DeprecationWarning, stacklevel=2 + src_loc_at)
|
||||
|
||||
if bits_sign is None:
|
||||
if min is None:
|
||||
min = 0
|
||||
if max is None:
|
||||
max = 2
|
||||
max -= 1 # make both bounds inclusive
|
||||
if min > max:
|
||||
raise ValueError("Lower bound {} should be less or equal to higher bound {}"
|
||||
.format(min, max + 1))
|
||||
sign = min < 0 or max < 0
|
||||
if min == max:
|
||||
bits = 0
|
||||
else:
|
||||
bits = builtins.max(bits_for(min, sign), bits_for(max, sign))
|
||||
shape = signed(bits) if sign else unsigned(bits)
|
||||
else:
|
||||
if not (min is None and max is None):
|
||||
raise ValueError("Only one of bits/signedness or bounds may be specified")
|
||||
shape = bits_sign
|
||||
|
||||
super().__init__(shape=shape, name=name_override or name,
|
||||
reset=reset, reset_less=reset_less,
|
||||
attrs=attr, src_loc_at=1 + src_loc_at, **kwargs)
|
||||
|
||||
|
||||
Signal = CompatSignal
|
||||
|
||||
|
||||
@deprecated("instead of `Constant`, use `Const`")
|
||||
def Constant(value, bits_sign=None):
|
||||
return Const(value, bits_sign)
|
||||
|
||||
|
||||
@deprecated("instead of `Replicate`, use `Repl`")
|
||||
def Replicate(v, n):
|
||||
return Repl(v, n)
|
||||
|
||||
|
||||
@extend(Const)
|
||||
@property
|
||||
@deprecated("instead of `.nbits`, use `.width`")
|
||||
def nbits(self):
|
||||
return self.width
|
||||
|
||||
|
||||
@extend(NativeSignal)
|
||||
@property
|
||||
@deprecated("instead of `.nbits`, use `.width`")
|
||||
def nbits(self):
|
||||
return self.width
|
||||
|
||||
|
||||
@extend(NativeSignal)
|
||||
@NativeSignal.nbits.setter
|
||||
@deprecated("instead of `.nbits = x`, use `.width = x`")
|
||||
def nbits(self, value):
|
||||
self.width = value
|
||||
|
||||
|
||||
@extend(NativeSignal)
|
||||
@deprecated("instead of `.part`, use `.bit_select`")
|
||||
def part(self, offset, width):
|
||||
return Part(self, offset, width, src_loc_at=2)
|
||||
|
||||
|
||||
@extend(Cat)
|
||||
@property
|
||||
@deprecated("instead of `.l`, use `.parts`")
|
||||
def l(self):
|
||||
return self.parts
|
||||
|
||||
|
||||
@extend(ast.Operator)
|
||||
@property
|
||||
@deprecated("instead of `.op`, use `.operator`")
|
||||
def op(self):
|
||||
return self.operator
|
||||
|
||||
|
||||
@extend(_ArrayProxy)
|
||||
@property
|
||||
@deprecated("instead `_ArrayProxy.choices`, use `ArrayProxy.elems`")
|
||||
def choices(self):
|
||||
return self.elems
|
||||
|
||||
|
||||
class If(ast.Switch):
|
||||
@deprecated("instead of `If(cond, ...)`, use `with m.If(cond): ...`")
|
||||
def __init__(self, cond, *stmts):
|
||||
cond = Value.cast(cond)
|
||||
if len(cond) != 1:
|
||||
cond = cond.bool()
|
||||
super().__init__(cond, {("1",): ast.Statement.cast(stmts)})
|
||||
|
||||
@deprecated("instead of `.Elif(cond, ...)`, use `with m.Elif(cond): ...`")
|
||||
def Elif(self, cond, *stmts):
|
||||
cond = Value.cast(cond)
|
||||
if len(cond) != 1:
|
||||
cond = cond.bool()
|
||||
self.cases = OrderedDict((("-" + k,), v) for (k,), v in self.cases.items())
|
||||
self.cases[("1" + "-" * len(self.test),)] = ast.Statement.cast(stmts)
|
||||
self.test = Cat(self.test, cond)
|
||||
return self
|
||||
|
||||
@deprecated("instead of `.Else(...)`, use `with m.Else(): ...`")
|
||||
def Else(self, *stmts):
|
||||
self.cases[()] = ast.Statement.cast(stmts)
|
||||
return self
|
||||
|
||||
|
||||
class Case(ast.Switch):
|
||||
@deprecated("instead of `Case(test, { value: stmts })`, use `with m.Switch(test):` and "
|
||||
"`with m.Case(value): stmts`; instead of `\"default\": stmts`, use "
|
||||
"`with m.Case(): stmts`")
|
||||
def __init__(self, test, cases):
|
||||
new_cases = []
|
||||
default = None
|
||||
for k, v in cases.items():
|
||||
if isinstance(k, (bool, int)):
|
||||
k = Const(k)
|
||||
if (not isinstance(k, Const)
|
||||
and not (isinstance(k, str) and k == "default")):
|
||||
raise TypeError("Case object is not a Migen constant")
|
||||
if isinstance(k, str) and k == "default":
|
||||
default = v
|
||||
continue
|
||||
else:
|
||||
k = k.value
|
||||
new_cases.append((k, v))
|
||||
if default is not None:
|
||||
new_cases.append((None, default))
|
||||
super().__init__(test, OrderedDict(new_cases))
|
||||
|
||||
@deprecated("instead of `Case(...).makedefault()`, use an explicit default case: "
|
||||
"`with m.Case(): ...`")
|
||||
def makedefault(self, key=None):
|
||||
if key is None:
|
||||
for choice in self.cases.keys():
|
||||
if (key is None
|
||||
or (isinstance(choice, str) and choice == "default")
|
||||
or choice > key):
|
||||
key = choice
|
||||
elif isinstance(key, str) and key == "default":
|
||||
key = ()
|
||||
else:
|
||||
key = ("{:0{}b}".format(ast.Value.cast(key).value, len(self.test)),)
|
||||
stmts = self.cases[key]
|
||||
del self.cases[key]
|
||||
self.cases[()] = stmts
|
||||
return self
|
||||
warnings.warn("instead of nmigen.compat.fhdl.structure, use amaranth.compat.fhdl.structure",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,35 +1,6 @@
|
|||
from amaranth.compat.fhdl.verilog import *
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from ...hdl.ir import Fragment
|
||||
from ...hdl.cd import ClockDomain
|
||||
from ...back import verilog
|
||||
from .conv_output import ConvOutput
|
||||
from .module import Module
|
||||
|
||||
|
||||
def convert(fi, ios=None, name="top", special_overrides=dict(),
|
||||
attr_translate=None, create_clock_domains=True,
|
||||
display_run=False):
|
||||
if display_run:
|
||||
warnings.warn("`display_run=True` support has been removed",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
if special_overrides:
|
||||
warnings.warn("`special_overrides` support as well as `Special` has been removed",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
# TODO: attr_translate
|
||||
|
||||
if isinstance(fi, Module):
|
||||
fi = fi.get_fragment()
|
||||
|
||||
def missing_domain(name):
|
||||
if create_clock_domains:
|
||||
return ClockDomain(name)
|
||||
v_output = verilog.convert(
|
||||
elaboratable=fi,
|
||||
name=name,
|
||||
ports=ios or (),
|
||||
missing_domain=missing_domain
|
||||
)
|
||||
output = ConvOutput()
|
||||
output.set_main_source(v_output)
|
||||
return output
|
||||
warnings.warn("instead of nmigen.compat.fhdl.verilog, use amaranth.compat.fhdl.verilog",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
from amaranth.compat.genlib import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.genlib, use amaranth.compat.genlib",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
@ -1,74 +1,7 @@
|
|||
from amaranth.compat.genlib.cdc import *
|
||||
from amaranth.compat.genlib.cdc import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from ..._utils import deprecated
|
||||
from ...lib.cdc import FFSynchronizer as NativeFFSynchronizer
|
||||
from ...lib.cdc import PulseSynchronizer as NativePulseSynchronizer
|
||||
from ...hdl.ast import *
|
||||
from ..fhdl.module import CompatModule
|
||||
from ..fhdl.structure import If
|
||||
|
||||
|
||||
__all__ = ["MultiReg", "PulseSynchronizer", "GrayCounter", "GrayDecoder"]
|
||||
|
||||
|
||||
class MultiReg(NativeFFSynchronizer):
|
||||
def __init__(self, i, o, odomain="sync", n=2, reset=0):
|
||||
old_opts = []
|
||||
new_opts = []
|
||||
if odomain != "sync":
|
||||
old_opts.append(", odomain={!r}".format(odomain))
|
||||
new_opts.append(", o_domain={!r}".format(odomain))
|
||||
if n != 2:
|
||||
old_opts.append(", n={!r}".format(n))
|
||||
new_opts.append(", stages={!r}".format(n))
|
||||
warnings.warn("instead of `MultiReg(...{})`, use `FFSynchronizer(...{})`"
|
||||
.format("".join(old_opts), "".join(new_opts)),
|
||||
DeprecationWarning, stacklevel=2)
|
||||
super().__init__(i, o, o_domain=odomain, stages=n, reset=reset)
|
||||
self.odomain = odomain
|
||||
|
||||
|
||||
@deprecated("instead of `migen.genlib.cdc.PulseSynchronizer`, use `nmigen.lib.cdc.PulseSynchronizer`")
|
||||
class PulseSynchronizer(NativePulseSynchronizer):
|
||||
def __init__(self, idomain, odomain):
|
||||
super().__init__(i_domain=idomain, o_domain=odomain)
|
||||
|
||||
|
||||
@deprecated("instead of `migen.genlib.cdc.GrayCounter`, use `nmigen.lib.coding.GrayEncoder`")
|
||||
class GrayCounter(CompatModule):
|
||||
def __init__(self, width):
|
||||
self.ce = Signal()
|
||||
self.q = Signal(width)
|
||||
self.q_next = Signal(width)
|
||||
self.q_binary = Signal(width)
|
||||
self.q_next_binary = Signal(width)
|
||||
|
||||
###
|
||||
|
||||
self.comb += [
|
||||
If(self.ce,
|
||||
self.q_next_binary.eq(self.q_binary + 1)
|
||||
).Else(
|
||||
self.q_next_binary.eq(self.q_binary)
|
||||
),
|
||||
self.q_next.eq(self.q_next_binary ^ self.q_next_binary[1:])
|
||||
]
|
||||
self.sync += [
|
||||
self.q_binary.eq(self.q_next_binary),
|
||||
self.q.eq(self.q_next)
|
||||
]
|
||||
|
||||
|
||||
@deprecated("instead of `migen.genlib.cdc.GrayDecoder`, use `nmigen.lib.coding.GrayDecoder`")
|
||||
class GrayDecoder(CompatModule):
|
||||
def __init__(self, width):
|
||||
self.i = Signal(width)
|
||||
self.o = Signal(width, reset_less=True)
|
||||
|
||||
# # #
|
||||
|
||||
o_comb = Signal(width)
|
||||
self.comb += o_comb[-1].eq(self.i[-1])
|
||||
for i in reversed(range(width-1)):
|
||||
self.comb += o_comb[i].eq(o_comb[i+1] ^ self.i[i])
|
||||
self.sync += self.o.eq(o_comb)
|
||||
warnings.warn("instead of nmigen.compat.genlib.cdc, use amaranth.compat.genlib.cdc",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
from ...lib.coding import *
|
||||
from amaranth.compat.genlib.coding import *
|
||||
from amaranth.compat.genlib.coding import __all__
|
||||
|
||||
|
||||
__all__ = ["Encoder", "PriorityEncoder", "Decoder", "PriorityDecoder"]
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.genlib.coding, use amaranth.compat.genlib.coding",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,147 +1,7 @@
|
|||
from ..._utils import deprecated, extend
|
||||
from ...lib.fifo import (FIFOInterface as NativeFIFOInterface,
|
||||
SyncFIFO as NativeSyncFIFO, SyncFIFOBuffered as NativeSyncFIFOBuffered,
|
||||
AsyncFIFO as NativeAsyncFIFO, AsyncFIFOBuffered as NativeAsyncFIFOBuffered)
|
||||
from amaranth.compat.genlib.fifo import *
|
||||
from amaranth.compat.genlib.fifo import __all__
|
||||
|
||||
|
||||
__all__ = ["_FIFOInterface", "SyncFIFO", "SyncFIFOBuffered", "AsyncFIFO", "AsyncFIFOBuffered"]
|
||||
|
||||
|
||||
class CompatFIFOInterface(NativeFIFOInterface):
|
||||
@deprecated("attribute `fwft` must be provided to FIFOInterface constructor")
|
||||
def __init__(self, width, depth):
|
||||
super().__init__(width=width, depth=depth, fwft=False)
|
||||
del self.fwft
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@property
|
||||
@deprecated("instead of `fifo.din`, use `fifo.w_data`")
|
||||
def din(self):
|
||||
return self.w_data
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@NativeFIFOInterface.din.setter
|
||||
@deprecated("instead of `fifo.din = x`, use `fifo.w_data = x`")
|
||||
def din(self, w_data):
|
||||
self.w_data = w_data
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@property
|
||||
@deprecated("instead of `fifo.writable`, use `fifo.w_rdy`")
|
||||
def writable(self):
|
||||
return self.w_rdy
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@NativeFIFOInterface.writable.setter
|
||||
@deprecated("instead of `fifo.writable = x`, use `fifo.w_rdy = x`")
|
||||
def writable(self, w_rdy):
|
||||
self.w_rdy = w_rdy
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@property
|
||||
@deprecated("instead of `fifo.we`, use `fifo.w_en`")
|
||||
def we(self):
|
||||
return self.w_en
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@NativeFIFOInterface.we.setter
|
||||
@deprecated("instead of `fifo.we = x`, use `fifo.w_en = x`")
|
||||
def we(self, w_en):
|
||||
self.w_en = w_en
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@property
|
||||
@deprecated("instead of `fifo.dout`, use `fifo.r_data`")
|
||||
def dout(self):
|
||||
return self.r_data
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@NativeFIFOInterface.dout.setter
|
||||
@deprecated("instead of `fifo.dout = x`, use `fifo.r_data = x`")
|
||||
def dout(self, r_data):
|
||||
self.r_data = r_data
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@property
|
||||
@deprecated("instead of `fifo.readable`, use `fifo.r_rdy`")
|
||||
def readable(self):
|
||||
return self.r_rdy
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@NativeFIFOInterface.readable.setter
|
||||
@deprecated("instead of `fifo.readable = x`, use `fifo.r_rdy = x`")
|
||||
def readable(self, r_rdy):
|
||||
self.r_rdy = r_rdy
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@property
|
||||
@deprecated("instead of `fifo.re`, use `fifo.r_en`")
|
||||
def re(self):
|
||||
return self.r_en
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
@NativeFIFOInterface.re.setter
|
||||
@deprecated("instead of `fifo.re = x`, use `fifo.r_en = x`")
|
||||
def re(self, r_en):
|
||||
self.r_en = r_en
|
||||
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
def read(self):
|
||||
"""Read method for simulation."""
|
||||
assert (yield self.r_rdy)
|
||||
value = (yield self.r_data)
|
||||
yield self.r_en.eq(1)
|
||||
yield
|
||||
yield self.r_en.eq(0)
|
||||
yield
|
||||
return value
|
||||
|
||||
@extend(NativeFIFOInterface)
|
||||
def write(self, data):
|
||||
"""Write method for simulation."""
|
||||
assert (yield self.w_rdy)
|
||||
yield self.w_data.eq(data)
|
||||
yield self.w_en.eq(1)
|
||||
yield
|
||||
yield self.w_en.eq(0)
|
||||
yield
|
||||
|
||||
|
||||
class CompatSyncFIFO(NativeSyncFIFO):
|
||||
def __init__(self, width, depth, fwft=True):
|
||||
super().__init__(width=width, depth=depth, fwft=fwft)
|
||||
|
||||
|
||||
class CompatSyncFIFOBuffered(NativeSyncFIFOBuffered):
|
||||
def __init__(self, width, depth):
|
||||
super().__init__(width=width, depth=depth)
|
||||
|
||||
|
||||
class CompatAsyncFIFO(NativeAsyncFIFO):
|
||||
def __init__(self, width, depth):
|
||||
super().__init__(width=width, depth=depth)
|
||||
|
||||
|
||||
class CompatAsyncFIFOBuffered(NativeAsyncFIFOBuffered):
|
||||
def __init__(self, width, depth):
|
||||
super().__init__(width=width, depth=depth)
|
||||
|
||||
|
||||
_FIFOInterface = CompatFIFOInterface
|
||||
SyncFIFO = CompatSyncFIFO
|
||||
SyncFIFOBuffered = CompatSyncFIFOBuffered
|
||||
AsyncFIFO = CompatAsyncFIFO
|
||||
AsyncFIFOBuffered = CompatAsyncFIFOBuffered
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.genlib.fifo, use amaranth.compat.genlib.fifo",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,193 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
from ..._utils import deprecated, _ignore_deprecated
|
||||
from ...hdl.xfrm import ValueTransformer, StatementTransformer
|
||||
from ...hdl.ast import *
|
||||
from ...hdl.ast import Signal as NativeSignal
|
||||
from ..fhdl.module import CompatModule, CompatFinalizeError
|
||||
from ..fhdl.structure import Signal, If, Case
|
||||
from amaranth.compat.genlib.fsm import *
|
||||
from amaranth.compat.genlib.fsm import __all__
|
||||
|
||||
|
||||
__all__ = ["AnonymousState", "NextState", "NextValue", "FSM"]
|
||||
|
||||
|
||||
class AnonymousState:
|
||||
pass
|
||||
|
||||
|
||||
class NextState(Statement):
|
||||
def __init__(self, state):
|
||||
super().__init__()
|
||||
self.state = state
|
||||
|
||||
|
||||
class NextValue(Statement):
|
||||
def __init__(self, target, value):
|
||||
super().__init__()
|
||||
self.target = target
|
||||
self.value = value
|
||||
|
||||
|
||||
def _target_eq(a, b):
|
||||
if type(a) != type(b):
|
||||
return False
|
||||
ty = type(a)
|
||||
if ty == Const:
|
||||
return a.value == b.value
|
||||
elif ty == NativeSignal or ty == Signal:
|
||||
return a is b
|
||||
elif ty == Cat:
|
||||
return all(_target_eq(x, y) for x, y in zip(a.l, b.l))
|
||||
elif ty == Slice:
|
||||
return (_target_eq(a.value, b.value)
|
||||
and a.start == b.start
|
||||
and a.stop == b.stop)
|
||||
elif ty == Part:
|
||||
return (_target_eq(a.value, b.value)
|
||||
and _target_eq(a.offset == b.offset)
|
||||
and a.width == b.width)
|
||||
elif ty == ArrayProxy:
|
||||
return (all(_target_eq(x, y) for x, y in zip(a.choices, b.choices))
|
||||
and _target_eq(a.key, b.key))
|
||||
else:
|
||||
raise ValueError("NextValue cannot be used with target type '{}'"
|
||||
.format(ty))
|
||||
|
||||
|
||||
class _LowerNext(ValueTransformer, StatementTransformer):
|
||||
def __init__(self, next_state_signal, encoding, aliases):
|
||||
self.next_state_signal = next_state_signal
|
||||
self.encoding = encoding
|
||||
self.aliases = aliases
|
||||
# (target, next_value_ce, next_value)
|
||||
self.registers = []
|
||||
|
||||
def _get_register_control(self, target):
|
||||
for x in self.registers:
|
||||
if _target_eq(target, x[0]):
|
||||
return x[1], x[2]
|
||||
raise KeyError
|
||||
|
||||
def on_unknown_statement(self, node):
|
||||
if isinstance(node, NextState):
|
||||
try:
|
||||
actual_state = self.aliases[node.state]
|
||||
except KeyError:
|
||||
actual_state = node.state
|
||||
return self.next_state_signal.eq(self.encoding[actual_state])
|
||||
elif isinstance(node, NextValue):
|
||||
try:
|
||||
next_value_ce, next_value = self._get_register_control(node.target)
|
||||
except KeyError:
|
||||
related = node.target if isinstance(node.target, Signal) else None
|
||||
next_value = Signal(node.target.shape(),
|
||||
name=None if related is None else "{}_fsm_next".format(related.name))
|
||||
next_value_ce = Signal(
|
||||
name=None if related is None else "{}_fsm_next_ce".format(related.name))
|
||||
self.registers.append((node.target, next_value_ce, next_value))
|
||||
return next_value.eq(node.value), next_value_ce.eq(1)
|
||||
else:
|
||||
return node
|
||||
|
||||
|
||||
@deprecated("instead of `migen.genlib.fsm.FSM()`, use `with m.FSM():`; note that there is no "
|
||||
"replacement for `{before,after}_{entering,leaving}` and `delayed_enter` methods")
|
||||
class FSM(CompatModule):
|
||||
def __init__(self, reset_state=None):
|
||||
self.actions = OrderedDict()
|
||||
self.state_aliases = dict()
|
||||
self.reset_state = reset_state
|
||||
|
||||
self.before_entering_signals = OrderedDict()
|
||||
self.before_leaving_signals = OrderedDict()
|
||||
self.after_entering_signals = OrderedDict()
|
||||
self.after_leaving_signals = OrderedDict()
|
||||
|
||||
def act(self, state, *statements):
|
||||
if self.finalized:
|
||||
raise CompatFinalizeError
|
||||
if self.reset_state is None:
|
||||
self.reset_state = state
|
||||
if state not in self.actions:
|
||||
self.actions[state] = []
|
||||
self.actions[state] += statements
|
||||
|
||||
def delayed_enter(self, name, target, delay):
|
||||
if self.finalized:
|
||||
raise CompatFinalizeError
|
||||
if delay > 0:
|
||||
state = name
|
||||
for i in range(delay):
|
||||
if i == delay - 1:
|
||||
next_state = target
|
||||
else:
|
||||
next_state = AnonymousState()
|
||||
self.act(state, NextState(next_state))
|
||||
state = next_state
|
||||
else:
|
||||
self.state_aliases[name] = target
|
||||
|
||||
def ongoing(self, state):
|
||||
is_ongoing = Signal()
|
||||
self.act(state, is_ongoing.eq(1))
|
||||
return is_ongoing
|
||||
|
||||
def _get_signal(self, d, state):
|
||||
if state not in self.actions:
|
||||
self.actions[state] = []
|
||||
try:
|
||||
return d[state]
|
||||
except KeyError:
|
||||
is_el = Signal()
|
||||
d[state] = is_el
|
||||
return is_el
|
||||
|
||||
def before_entering(self, state):
|
||||
return self._get_signal(self.before_entering_signals, state)
|
||||
|
||||
def before_leaving(self, state):
|
||||
return self._get_signal(self.before_leaving_signals, state)
|
||||
|
||||
def after_entering(self, state):
|
||||
signal = self._get_signal(self.after_entering_signals, state)
|
||||
self.sync += signal.eq(self.before_entering(state))
|
||||
return signal
|
||||
|
||||
def after_leaving(self, state):
|
||||
signal = self._get_signal(self.after_leaving_signals, state)
|
||||
self.sync += signal.eq(self.before_leaving(state))
|
||||
return signal
|
||||
|
||||
@_ignore_deprecated
|
||||
def do_finalize(self):
|
||||
nstates = len(self.actions)
|
||||
self.encoding = dict((s, n) for n, s in enumerate(self.actions.keys()))
|
||||
self.decoding = {n: s for s, n in self.encoding.items()}
|
||||
|
||||
decoder = lambda n: "{}/{}".format(self.decoding[n], n)
|
||||
self.state = Signal(range(nstates), reset=self.encoding[self.reset_state], decoder=decoder)
|
||||
self.next_state = Signal.like(self.state)
|
||||
|
||||
for state, signal in self.before_leaving_signals.items():
|
||||
encoded = self.encoding[state]
|
||||
self.comb += signal.eq((self.state == encoded) & ~(self.next_state == encoded))
|
||||
if self.reset_state in self.after_entering_signals:
|
||||
self.after_entering_signals[self.reset_state].reset = 1
|
||||
for state, signal in self.before_entering_signals.items():
|
||||
encoded = self.encoding[state]
|
||||
self.comb += signal.eq(~(self.state == encoded) & (self.next_state == encoded))
|
||||
|
||||
self._finalize_sync(self._lower_controls())
|
||||
|
||||
def _lower_controls(self):
|
||||
return _LowerNext(self.next_state, self.encoding, self.state_aliases)
|
||||
|
||||
def _finalize_sync(self, ls):
|
||||
cases = dict((self.encoding[k], ls.on_statement(v)) for k, v in self.actions.items() if v)
|
||||
self.comb += [
|
||||
self.next_state.eq(self.state),
|
||||
Case(self.state, cases).makedefault(self.encoding[self.reset_state])
|
||||
]
|
||||
self.sync += self.state.eq(self.next_state)
|
||||
for register, next_value_ce, next_value in ls.registers:
|
||||
self.sync += If(next_value_ce, register.eq(next_value))
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.genlib.fsm, use amaranth.compat.genlib.fsm",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,195 +1,6 @@
|
|||
from ...tracer import *
|
||||
from ..fhdl.structure import *
|
||||
|
||||
from functools import reduce
|
||||
from operator import or_
|
||||
from amaranth.compat.genlib.record import *
|
||||
|
||||
|
||||
(DIR_NONE, DIR_S_TO_M, DIR_M_TO_S) = range(3)
|
||||
|
||||
# Possible layout elements:
|
||||
# 1. (name, size)
|
||||
# 2. (name, size, direction)
|
||||
# 3. (name, sublayout)
|
||||
# size can be an int, or a (int, bool) tuple for signed numbers
|
||||
# sublayout must be a list
|
||||
|
||||
|
||||
def set_layout_parameters(layout, **layout_dict):
|
||||
def resolve(p):
|
||||
if isinstance(p, str):
|
||||
try:
|
||||
return layout_dict[p]
|
||||
except KeyError:
|
||||
return p
|
||||
else:
|
||||
return p
|
||||
|
||||
r = []
|
||||
for f in layout:
|
||||
if isinstance(f[1], (int, tuple, str)): # cases 1/2
|
||||
if len(f) == 3:
|
||||
r.append((f[0], resolve(f[1]), f[2]))
|
||||
else:
|
||||
r.append((f[0], resolve(f[1])))
|
||||
elif isinstance(f[1], list): # case 3
|
||||
r.append((f[0], set_layout_parameters(f[1], **layout_dict)))
|
||||
else:
|
||||
raise TypeError
|
||||
return r
|
||||
|
||||
|
||||
def layout_len(layout):
|
||||
r = 0
|
||||
for f in layout:
|
||||
if isinstance(f[1], (int, tuple)): # cases 1/2
|
||||
if len(f) == 3:
|
||||
fname, fsize, fdirection = f
|
||||
else:
|
||||
fname, fsize = f
|
||||
elif isinstance(f[1], list): # case 3
|
||||
fname, fsublayout = f
|
||||
fsize = layout_len(fsublayout)
|
||||
else:
|
||||
raise TypeError
|
||||
if isinstance(fsize, tuple):
|
||||
r += fsize[0]
|
||||
else:
|
||||
r += fsize
|
||||
return r
|
||||
|
||||
|
||||
def layout_get(layout, name):
|
||||
for f in layout:
|
||||
if f[0] == name:
|
||||
return f
|
||||
raise KeyError(name)
|
||||
|
||||
|
||||
def layout_partial(layout, *elements):
|
||||
r = []
|
||||
for path in elements:
|
||||
path_s = path.split("/")
|
||||
last = path_s.pop()
|
||||
copy_ref = layout
|
||||
insert_ref = r
|
||||
for hop in path_s:
|
||||
name, copy_ref = layout_get(copy_ref, hop)
|
||||
try:
|
||||
name, insert_ref = layout_get(insert_ref, hop)
|
||||
except KeyError:
|
||||
new_insert_ref = []
|
||||
insert_ref.append((hop, new_insert_ref))
|
||||
insert_ref = new_insert_ref
|
||||
insert_ref.append(layout_get(copy_ref, last))
|
||||
return r
|
||||
|
||||
|
||||
class Record:
|
||||
def __init__(self, layout, name=None, **kwargs):
|
||||
try:
|
||||
self.name = get_var_name()
|
||||
except NameNotFound:
|
||||
self.name = ""
|
||||
self.layout = layout
|
||||
|
||||
if self.name:
|
||||
prefix = self.name + "_"
|
||||
else:
|
||||
prefix = ""
|
||||
for f in self.layout:
|
||||
if isinstance(f[1], (int, tuple)): # cases 1/2
|
||||
if(len(f) == 3):
|
||||
fname, fsize, fdirection = f
|
||||
else:
|
||||
fname, fsize = f
|
||||
finst = Signal(fsize, name=prefix + fname, **kwargs)
|
||||
elif isinstance(f[1], list): # case 3
|
||||
fname, fsublayout = f
|
||||
finst = Record(fsublayout, prefix + fname, **kwargs)
|
||||
else:
|
||||
raise TypeError
|
||||
setattr(self, fname, finst)
|
||||
|
||||
def eq(self, other):
|
||||
return [getattr(self, f[0]).eq(getattr(other, f[0]))
|
||||
for f in self.layout if hasattr(other, f[0])]
|
||||
|
||||
def iter_flat(self):
|
||||
for f in self.layout:
|
||||
e = getattr(self, f[0])
|
||||
if isinstance(e, Signal):
|
||||
if len(f) == 3:
|
||||
yield e, f[2]
|
||||
else:
|
||||
yield e, DIR_NONE
|
||||
elif isinstance(e, Record):
|
||||
yield from e.iter_flat()
|
||||
else:
|
||||
raise TypeError
|
||||
|
||||
def flatten(self):
|
||||
return [signal for signal, direction in self.iter_flat()]
|
||||
|
||||
def raw_bits(self):
|
||||
return Cat(*self.flatten())
|
||||
|
||||
def connect(self, *slaves, keep=None, omit=None):
|
||||
if keep is None:
|
||||
_keep = set([f[0] for f in self.layout])
|
||||
elif isinstance(keep, list):
|
||||
_keep = set(keep)
|
||||
else:
|
||||
_keep = keep
|
||||
if omit is None:
|
||||
_omit = set()
|
||||
elif isinstance(omit, list):
|
||||
_omit = set(omit)
|
||||
else:
|
||||
_omit = omit
|
||||
|
||||
_keep = _keep - _omit
|
||||
|
||||
r = []
|
||||
for f in self.layout:
|
||||
field = f[0]
|
||||
self_e = getattr(self, field)
|
||||
if isinstance(self_e, Signal):
|
||||
if field in _keep:
|
||||
direction = f[2]
|
||||
if direction == DIR_M_TO_S:
|
||||
r += [getattr(slave, field).eq(self_e) for slave in slaves]
|
||||
elif direction == DIR_S_TO_M:
|
||||
r.append(self_e.eq(reduce(or_, [getattr(slave, field) for slave in slaves])))
|
||||
else:
|
||||
raise TypeError
|
||||
else:
|
||||
for slave in slaves:
|
||||
r += self_e.connect(getattr(slave, field), keep=keep, omit=omit)
|
||||
return r
|
||||
|
||||
def connect_flat(self, *slaves):
|
||||
r = []
|
||||
iter_slaves = [slave.iter_flat() for slave in slaves]
|
||||
for m_signal, m_direction in self.iter_flat():
|
||||
if m_direction == DIR_M_TO_S:
|
||||
for iter_slave in iter_slaves:
|
||||
s_signal, s_direction = next(iter_slave)
|
||||
assert(s_direction == DIR_M_TO_S)
|
||||
r.append(s_signal.eq(m_signal))
|
||||
elif m_direction == DIR_S_TO_M:
|
||||
s_signals = []
|
||||
for iter_slave in iter_slaves:
|
||||
s_signal, s_direction = next(iter_slave)
|
||||
assert(s_direction == DIR_S_TO_M)
|
||||
s_signals.append(s_signal)
|
||||
r.append(m_signal.eq(reduce(or_, s_signals)))
|
||||
else:
|
||||
raise TypeError
|
||||
return r
|
||||
|
||||
def __len__(self):
|
||||
return layout_len(self.layout)
|
||||
|
||||
def __repr__(self):
|
||||
return "<Record " + ":".join(f[0] for f in self.layout) + " at " + hex(id(self)) + ">"
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.genlib.record, use amaranth.compat.genlib.record",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,16 +1,7 @@
|
|||
from ..._utils import deprecated
|
||||
from ...lib.cdc import ResetSynchronizer as NativeResetSynchronizer
|
||||
from amaranth.compat.genlib.resetsync import *
|
||||
from amaranth.compat.genlib.resetsync import __all__
|
||||
|
||||
|
||||
__all__ = ["AsyncResetSynchronizer"]
|
||||
|
||||
|
||||
@deprecated("instead of `migen.genlib.resetsync.AsyncResetSynchronizer`, "
|
||||
"use `nmigen.lib.cdc.ResetSynchronizer`; note that ResetSynchronizer accepts "
|
||||
"a clock domain name as an argument, not a clock domain object")
|
||||
class CompatResetSynchronizer(NativeResetSynchronizer):
|
||||
def __init__(self, cd, async_reset):
|
||||
super().__init__(async_reset, domain=cd.name)
|
||||
|
||||
|
||||
AsyncResetSynchronizer = CompatResetSynchronizer
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.genlib.resetsync, use amaranth.compat.genlib.resetsync",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,58 +1,7 @@
|
|||
from amaranth.compat.genlib.roundrobin import *
|
||||
from amaranth.compat.genlib.roundrobin import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from ..fhdl.structure import Signal, If, Case
|
||||
from ..fhdl.module import CompatModule
|
||||
|
||||
|
||||
__all__ = ["RoundRobin", "SP_WITHDRAW", "SP_CE"]
|
||||
|
||||
(SP_WITHDRAW, SP_CE) = range(2)
|
||||
|
||||
class CompatRoundRobin(CompatModule):
|
||||
def __init__(self, n, switch_policy=SP_WITHDRAW):
|
||||
self.request = Signal(n)
|
||||
self.grant = Signal(max=max(2, n))
|
||||
self.switch_policy = switch_policy
|
||||
if self.switch_policy == SP_CE:
|
||||
warnings.warn("instead of `migen.genlib.roundrobin.RoundRobin`, "
|
||||
"use `nmigen.lib.scheduler.RoundRobin`; note that RoundRobin does not "
|
||||
"require a policy anymore but to get the same behavior as SP_CE you"
|
||||
"should use an EnableInserter",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
self.ce = Signal()
|
||||
else:
|
||||
warnings.warn("instead of `migen.genlib.roundrobin.RoundRobin`, "
|
||||
"use `nmigen.lib.scheduler.RoundRobin`; note that RoundRobin does not "
|
||||
"require a policy anymore",
|
||||
DeprecationWarning, stacklevel=1)
|
||||
|
||||
###
|
||||
|
||||
if n > 1:
|
||||
cases = {}
|
||||
for i in range(n):
|
||||
switch = []
|
||||
for j in reversed(range(i+1, i+n)):
|
||||
t = j % n
|
||||
switch = [
|
||||
If(self.request[t],
|
||||
self.grant.eq(t)
|
||||
).Else(
|
||||
*switch
|
||||
)
|
||||
]
|
||||
if self.switch_policy == SP_WITHDRAW:
|
||||
case = [If(~self.request[i], *switch)]
|
||||
else:
|
||||
case = switch
|
||||
cases[i] = case
|
||||
statement = Case(self.grant, cases)
|
||||
if self.switch_policy == SP_CE:
|
||||
statement = If(self.ce, statement)
|
||||
self.sync += statement
|
||||
else:
|
||||
self.comb += self.grant.eq(0)
|
||||
|
||||
|
||||
|
||||
RoundRobin = CompatRoundRobin
|
||||
warnings.warn("instead of nmigen.compat.genlib.roundrobin, use amaranth.compat.genlib.roundrobin",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,54 +1,7 @@
|
|||
import functools
|
||||
import inspect
|
||||
from collections.abc import Iterable
|
||||
from ...hdl.cd import ClockDomain
|
||||
from ...hdl.ir import Fragment
|
||||
from ...sim import *
|
||||
from amaranth.compat.sim import *
|
||||
from amaranth.compat.sim import __all__
|
||||
|
||||
|
||||
__all__ = ["run_simulation", "passive"]
|
||||
|
||||
|
||||
def run_simulation(fragment_or_module, generators, clocks={"sync": 10}, vcd_name=None,
|
||||
special_overrides={}):
|
||||
assert not special_overrides
|
||||
|
||||
if hasattr(fragment_or_module, "get_fragment"):
|
||||
fragment = fragment_or_module.get_fragment()
|
||||
else:
|
||||
fragment = fragment_or_module
|
||||
|
||||
fragment = Fragment.get(fragment, platform=None)
|
||||
|
||||
if not isinstance(generators, dict):
|
||||
generators = {"sync": generators}
|
||||
if "sync" not in fragment.domains:
|
||||
fragment.add_domains(ClockDomain("sync"))
|
||||
|
||||
sim = Simulator(fragment)
|
||||
for domain, period in clocks.items():
|
||||
sim.add_clock(period / 1e9, domain=domain)
|
||||
for domain, processes in generators.items():
|
||||
def wrap(process):
|
||||
def wrapper():
|
||||
yield from process
|
||||
return wrapper
|
||||
if isinstance(processes, Iterable) and not inspect.isgenerator(processes):
|
||||
for process in processes:
|
||||
sim.add_sync_process(wrap(process), domain=domain)
|
||||
else:
|
||||
sim.add_sync_process(wrap(processes), domain=domain)
|
||||
|
||||
if vcd_name is not None:
|
||||
with sim.write_vcd(vcd_name):
|
||||
sim.run()
|
||||
else:
|
||||
sim.run()
|
||||
|
||||
|
||||
def passive(generator):
|
||||
@functools.wraps(generator)
|
||||
def wrapper(*args, **kwargs):
|
||||
yield Passive()
|
||||
yield from generator(*args, **kwargs)
|
||||
return wrapper
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.compat.sim, use amaranth.compat.sim",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,20 +1,7 @@
|
|||
from .ast import Shape, unsigned, signed
|
||||
from .ast import Value, Const, C, Mux, Cat, Repl, Array, Signal, ClockSignal, ResetSignal
|
||||
from .dsl import Module
|
||||
from .cd import ClockDomain
|
||||
from .ir import Elaboratable, Fragment, Instance
|
||||
from .mem import Memory
|
||||
from .rec import Record
|
||||
from .xfrm import DomainRenamer, ResetInserter, EnableInserter
|
||||
from amaranth.hdl import *
|
||||
from amaranth.hdl import __all__
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Shape", "unsigned", "signed",
|
||||
"Value", "Const", "C", "Mux", "Cat", "Repl", "Array", "Signal", "ClockSignal", "ResetSignal",
|
||||
"Module",
|
||||
"ClockDomain",
|
||||
"Elaboratable", "Fragment", "Instance",
|
||||
"Memory",
|
||||
"Record",
|
||||
"DomainRenamer", "ResetInserter", "EnableInserter",
|
||||
]
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.hdl, use amaranth.hdl",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
1780
nmigen/hdl/ast.py
1780
nmigen/hdl/ast.py
File diff suppressed because it is too large
Load diff
|
|
@ -1,84 +1,7 @@
|
|||
from .. import tracer
|
||||
from .ast import Signal
|
||||
from amaranth.hdl.cd import *
|
||||
from amaranth.hdl.cd import __all__
|
||||
|
||||
|
||||
__all__ = ["ClockDomain", "DomainError"]
|
||||
|
||||
|
||||
class DomainError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ClockDomain:
|
||||
"""Synchronous domain.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str or None
|
||||
Domain name. If ``None`` (the default) the name is inferred from the variable name this
|
||||
``ClockDomain`` is assigned to (stripping any `"cd_"` prefix).
|
||||
reset_less : bool
|
||||
If ``True``, the domain does not use a reset signal. Registers within this domain are
|
||||
still all initialized to their reset state once, e.g. through Verilog `"initial"`
|
||||
statements.
|
||||
clk_edge : str
|
||||
The edge of the clock signal on which signals are sampled. Must be one of "pos" or "neg".
|
||||
async_reset : bool
|
||||
If ``True``, the domain uses an asynchronous reset, and registers within this domain
|
||||
are initialized to their reset state when reset level changes. Otherwise, registers
|
||||
are initialized to reset state at the next clock cycle when reset is asserted.
|
||||
local : bool
|
||||
If ``True``, the domain will propagate only downwards in the design hierarchy. Otherwise,
|
||||
the domain will propagate everywhere.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
clk : Signal, inout
|
||||
The clock for this domain. Can be driven or used to drive other signals (preferably
|
||||
in combinatorial context).
|
||||
rst : Signal or None, inout
|
||||
Reset signal for this domain. Can be driven or used to drive.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _name_for(domain_name, signal_name):
|
||||
if domain_name == "sync":
|
||||
return signal_name
|
||||
else:
|
||||
return "{}_{}".format(domain_name, signal_name)
|
||||
|
||||
def __init__(self, name=None, *, clk_edge="pos", reset_less=False, async_reset=False,
|
||||
local=False):
|
||||
if name is None:
|
||||
try:
|
||||
name = tracer.get_var_name()
|
||||
except tracer.NameNotFound:
|
||||
raise ValueError("Clock domain name must be specified explicitly")
|
||||
if name.startswith("cd_"):
|
||||
name = name[3:]
|
||||
if name == "comb":
|
||||
raise ValueError("Domain '{}' may not be clocked".format(name))
|
||||
|
||||
if clk_edge not in ("pos", "neg"):
|
||||
raise ValueError("Domain clock edge must be one of 'pos' or 'neg', not {!r}"
|
||||
.format(clk_edge))
|
||||
|
||||
self.name = name
|
||||
|
||||
self.clk = Signal(name=self._name_for(name, "clk"), src_loc_at=1)
|
||||
self.clk_edge = clk_edge
|
||||
|
||||
if reset_less:
|
||||
self.rst = None
|
||||
else:
|
||||
self.rst = Signal(name=self._name_for(name, "rst"), src_loc_at=1)
|
||||
|
||||
self.async_reset = async_reset
|
||||
|
||||
self.local = local
|
||||
|
||||
def rename(self, new_name):
|
||||
self.name = new_name
|
||||
self.clk.name = self._name_for(new_name, "clk")
|
||||
if self.rst is not None:
|
||||
self.rst.name = self._name_for(new_name, "rst")
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.hdl.cd, use amaranth.hdl.cd",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,546 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
from contextlib import contextmanager, _GeneratorContextManager
|
||||
from functools import wraps
|
||||
from enum import Enum
|
||||
from amaranth.hdl.dsl import *
|
||||
from amaranth.hdl.dsl import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from .._utils import flatten, bits_for
|
||||
from .. import tracer
|
||||
from .ast import *
|
||||
from .ir import *
|
||||
from .cd import *
|
||||
from .xfrm import *
|
||||
|
||||
|
||||
__all__ = ["SyntaxError", "SyntaxWarning", "Module"]
|
||||
|
||||
|
||||
class SyntaxError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SyntaxWarning(Warning):
|
||||
pass
|
||||
|
||||
|
||||
class _ModuleBuilderProxy:
|
||||
def __init__(self, builder, depth):
|
||||
object.__setattr__(self, "_builder", builder)
|
||||
object.__setattr__(self, "_depth", depth)
|
||||
|
||||
|
||||
class _ModuleBuilderDomain(_ModuleBuilderProxy):
|
||||
def __init__(self, builder, depth, domain):
|
||||
super().__init__(builder, depth)
|
||||
self._domain = domain
|
||||
|
||||
def __iadd__(self, assigns):
|
||||
self._builder._add_statement(assigns, domain=self._domain, depth=self._depth)
|
||||
return self
|
||||
|
||||
|
||||
class _ModuleBuilderDomains(_ModuleBuilderProxy):
|
||||
def __getattr__(self, name):
|
||||
if name == "submodules":
|
||||
warnings.warn("Using '<module>.d.{}' would add statements to clock domain {!r}; "
|
||||
"did you mean <module>.{} instead?"
|
||||
.format(name, name, name),
|
||||
SyntaxWarning, stacklevel=2)
|
||||
if name == "comb":
|
||||
domain = None
|
||||
else:
|
||||
domain = name
|
||||
return _ModuleBuilderDomain(self._builder, self._depth, domain)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.__getattr__(name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == "_depth":
|
||||
object.__setattr__(self, name, value)
|
||||
elif not isinstance(value, _ModuleBuilderDomain):
|
||||
raise AttributeError("Cannot assign 'd.{}' attribute; did you mean 'd.{} +='?"
|
||||
.format(name, name))
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
return self.__setattr__(name, value)
|
||||
|
||||
|
||||
class _ModuleBuilderRoot:
|
||||
def __init__(self, builder, depth):
|
||||
self._builder = builder
|
||||
self.domain = self.d = _ModuleBuilderDomains(builder, depth)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in ("comb", "sync"):
|
||||
raise AttributeError("'{}' object has no attribute '{}'; did you mean 'd.{}'?"
|
||||
.format(type(self).__name__, name, name))
|
||||
raise AttributeError("'{}' object has no attribute '{}'"
|
||||
.format(type(self).__name__, name))
|
||||
|
||||
|
||||
class _ModuleBuilderSubmodules:
|
||||
def __init__(self, builder):
|
||||
object.__setattr__(self, "_builder", builder)
|
||||
|
||||
def __iadd__(self, modules):
|
||||
for module in flatten([modules]):
|
||||
self._builder._add_submodule(module)
|
||||
return self
|
||||
|
||||
def __setattr__(self, name, submodule):
|
||||
self._builder._add_submodule(submodule, name)
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
return self.__setattr__(name, value)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self._builder._get_submodule(name)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.__getattr__(name)
|
||||
|
||||
|
||||
class _ModuleBuilderDomainSet:
|
||||
def __init__(self, builder):
|
||||
object.__setattr__(self, "_builder", builder)
|
||||
|
||||
def __iadd__(self, domains):
|
||||
for domain in flatten([domains]):
|
||||
if not isinstance(domain, ClockDomain):
|
||||
raise TypeError("Only clock domains may be added to `m.domains`, not {!r}"
|
||||
.format(domain))
|
||||
self._builder._add_domain(domain)
|
||||
return self
|
||||
|
||||
def __setattr__(self, name, domain):
|
||||
if not isinstance(domain, ClockDomain):
|
||||
raise TypeError("Only clock domains may be added to `m.domains`, not {!r}"
|
||||
.format(domain))
|
||||
if domain.name != name:
|
||||
raise NameError("Clock domain name {!r} must match name in `m.domains.{} += ...` "
|
||||
"syntax"
|
||||
.format(domain.name, name))
|
||||
self._builder._add_domain(domain)
|
||||
|
||||
|
||||
# It's not particularly clean to depend on an internal interface, but, unfortunately, __bool__
|
||||
# must be defined on a class to be called during implicit conversion.
|
||||
class _GuardedContextManager(_GeneratorContextManager):
|
||||
def __init__(self, keyword, func, args, kwds):
|
||||
self.keyword = keyword
|
||||
return super().__init__(func, args, kwds)
|
||||
|
||||
def __bool__(self):
|
||||
raise SyntaxError("`if m.{kw}(...):` does not work; use `with m.{kw}(...)`"
|
||||
.format(kw=self.keyword))
|
||||
|
||||
|
||||
def _guardedcontextmanager(keyword):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def helper(*args, **kwds):
|
||||
return _GuardedContextManager(keyword, func, args, kwds)
|
||||
return helper
|
||||
return decorator
|
||||
|
||||
|
||||
class FSM:
|
||||
def __init__(self, state, encoding, decoding):
|
||||
self.state = state
|
||||
self.encoding = encoding
|
||||
self.decoding = decoding
|
||||
|
||||
def ongoing(self, name):
|
||||
if name not in self.encoding:
|
||||
self.encoding[name] = len(self.encoding)
|
||||
return Operator("==", [self.state, self.encoding[name]], src_loc_at=0)
|
||||
|
||||
|
||||
class Module(_ModuleBuilderRoot, Elaboratable):
|
||||
@classmethod
|
||||
def __init_subclass__(cls):
|
||||
raise SyntaxError("Instead of inheriting from `Module`, inherit from `Elaboratable` "
|
||||
"and return a `Module` from the `elaborate(self, platform)` method")
|
||||
|
||||
def __init__(self):
|
||||
_ModuleBuilderRoot.__init__(self, self, depth=0)
|
||||
self.submodules = _ModuleBuilderSubmodules(self)
|
||||
self.domains = _ModuleBuilderDomainSet(self)
|
||||
|
||||
self._statements = Statement.cast([])
|
||||
self._ctrl_context = None
|
||||
self._ctrl_stack = []
|
||||
|
||||
self._driving = SignalDict()
|
||||
self._named_submodules = {}
|
||||
self._anon_submodules = []
|
||||
self._domains = {}
|
||||
self._generated = {}
|
||||
|
||||
def _check_context(self, construct, context):
|
||||
if self._ctrl_context != context:
|
||||
if self._ctrl_context is None:
|
||||
raise SyntaxError("{} is not permitted outside of {}"
|
||||
.format(construct, context))
|
||||
else:
|
||||
if self._ctrl_context == "Switch":
|
||||
secondary_context = "Case"
|
||||
if self._ctrl_context == "FSM":
|
||||
secondary_context = "State"
|
||||
raise SyntaxError("{} is not permitted directly inside of {}; it is permitted "
|
||||
"inside of {} {}"
|
||||
.format(construct, self._ctrl_context,
|
||||
self._ctrl_context, secondary_context))
|
||||
|
||||
def _get_ctrl(self, name):
|
||||
if self._ctrl_stack:
|
||||
top_name, top_data = self._ctrl_stack[-1]
|
||||
if top_name == name:
|
||||
return top_data
|
||||
|
||||
def _flush_ctrl(self):
|
||||
while len(self._ctrl_stack) > self.domain._depth:
|
||||
self._pop_ctrl()
|
||||
|
||||
def _set_ctrl(self, name, data):
|
||||
self._flush_ctrl()
|
||||
self._ctrl_stack.append((name, data))
|
||||
return data
|
||||
|
||||
def _check_signed_cond(self, cond):
|
||||
cond = Value.cast(cond)
|
||||
width, signed = cond.shape()
|
||||
if signed:
|
||||
warnings.warn("Signed values in If/Elif conditions usually result from inverting "
|
||||
"Python booleans with ~, which leads to unexpected results. "
|
||||
"Replace `~flag` with `not flag`. (If this is a false positive, "
|
||||
"silence this warning with `m.If(x)` → `m.If(x.bool())`.)",
|
||||
SyntaxWarning, stacklevel=4)
|
||||
return cond
|
||||
|
||||
@_guardedcontextmanager("If")
|
||||
def If(self, cond):
|
||||
self._check_context("If", context=None)
|
||||
cond = self._check_signed_cond(cond)
|
||||
src_loc = tracer.get_src_loc(src_loc_at=1)
|
||||
if_data = self._set_ctrl("If", {
|
||||
"depth": self.domain._depth,
|
||||
"tests": [],
|
||||
"bodies": [],
|
||||
"src_loc": src_loc,
|
||||
"src_locs": [],
|
||||
})
|
||||
try:
|
||||
_outer_case, self._statements = self._statements, []
|
||||
self.domain._depth += 1
|
||||
yield
|
||||
self._flush_ctrl()
|
||||
if_data["tests"].append(cond)
|
||||
if_data["bodies"].append(self._statements)
|
||||
if_data["src_locs"].append(src_loc)
|
||||
finally:
|
||||
self.domain._depth -= 1
|
||||
self._statements = _outer_case
|
||||
|
||||
@_guardedcontextmanager("Elif")
|
||||
def Elif(self, cond):
|
||||
self._check_context("Elif", context=None)
|
||||
cond = self._check_signed_cond(cond)
|
||||
src_loc = tracer.get_src_loc(src_loc_at=1)
|
||||
if_data = self._get_ctrl("If")
|
||||
if if_data is None or if_data["depth"] != self.domain._depth:
|
||||
raise SyntaxError("Elif without preceding If")
|
||||
try:
|
||||
_outer_case, self._statements = self._statements, []
|
||||
self.domain._depth += 1
|
||||
yield
|
||||
self._flush_ctrl()
|
||||
if_data["tests"].append(cond)
|
||||
if_data["bodies"].append(self._statements)
|
||||
if_data["src_locs"].append(src_loc)
|
||||
finally:
|
||||
self.domain._depth -= 1
|
||||
self._statements = _outer_case
|
||||
|
||||
@_guardedcontextmanager("Else")
|
||||
def Else(self):
|
||||
self._check_context("Else", context=None)
|
||||
src_loc = tracer.get_src_loc(src_loc_at=1)
|
||||
if_data = self._get_ctrl("If")
|
||||
if if_data is None or if_data["depth"] != self.domain._depth:
|
||||
raise SyntaxError("Else without preceding If/Elif")
|
||||
try:
|
||||
_outer_case, self._statements = self._statements, []
|
||||
self.domain._depth += 1
|
||||
yield
|
||||
self._flush_ctrl()
|
||||
if_data["bodies"].append(self._statements)
|
||||
if_data["src_locs"].append(src_loc)
|
||||
finally:
|
||||
self.domain._depth -= 1
|
||||
self._statements = _outer_case
|
||||
self._pop_ctrl()
|
||||
|
||||
@contextmanager
|
||||
def Switch(self, test):
|
||||
self._check_context("Switch", context=None)
|
||||
switch_data = self._set_ctrl("Switch", {
|
||||
"test": Value.cast(test),
|
||||
"cases": OrderedDict(),
|
||||
"src_loc": tracer.get_src_loc(src_loc_at=1),
|
||||
"case_src_locs": {},
|
||||
})
|
||||
try:
|
||||
self._ctrl_context = "Switch"
|
||||
self.domain._depth += 1
|
||||
yield
|
||||
finally:
|
||||
self.domain._depth -= 1
|
||||
self._ctrl_context = None
|
||||
self._pop_ctrl()
|
||||
|
||||
@contextmanager
|
||||
def Case(self, *patterns):
|
||||
self._check_context("Case", context="Switch")
|
||||
src_loc = tracer.get_src_loc(src_loc_at=1)
|
||||
switch_data = self._get_ctrl("Switch")
|
||||
new_patterns = ()
|
||||
for pattern in patterns:
|
||||
if not isinstance(pattern, (int, str, Enum)):
|
||||
raise SyntaxError("Case pattern must be an integer, a string, or an enumeration, "
|
||||
"not {!r}"
|
||||
.format(pattern))
|
||||
if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern):
|
||||
raise SyntaxError("Case pattern '{}' must consist of 0, 1, and - (don't care) "
|
||||
"bits, and may include whitespace"
|
||||
.format(pattern))
|
||||
if (isinstance(pattern, str) and
|
||||
len("".join(pattern.split())) != len(switch_data["test"])):
|
||||
raise SyntaxError("Case pattern '{}' must have the same width as switch value "
|
||||
"(which is {})"
|
||||
.format(pattern, len(switch_data["test"])))
|
||||
if isinstance(pattern, int) and bits_for(pattern) > len(switch_data["test"]):
|
||||
warnings.warn("Case pattern '{:b}' is wider than switch value "
|
||||
"(which has width {}); comparison will never be true"
|
||||
.format(pattern, len(switch_data["test"])),
|
||||
SyntaxWarning, stacklevel=3)
|
||||
continue
|
||||
if isinstance(pattern, Enum) and bits_for(pattern.value) > len(switch_data["test"]):
|
||||
warnings.warn("Case pattern '{:b}' ({}.{}) is wider than switch value "
|
||||
"(which has width {}); comparison will never be true"
|
||||
.format(pattern.value, pattern.__class__.__name__, pattern.name,
|
||||
len(switch_data["test"])),
|
||||
SyntaxWarning, stacklevel=3)
|
||||
continue
|
||||
new_patterns = (*new_patterns, pattern)
|
||||
try:
|
||||
_outer_case, self._statements = self._statements, []
|
||||
self._ctrl_context = None
|
||||
yield
|
||||
self._flush_ctrl()
|
||||
# If none of the provided cases can possibly be true, omit this branch completely.
|
||||
# This needs to be differentiated from no cases being provided in the first place,
|
||||
# which means the branch will always match.
|
||||
if not (patterns and not new_patterns):
|
||||
switch_data["cases"][new_patterns] = self._statements
|
||||
switch_data["case_src_locs"][new_patterns] = src_loc
|
||||
finally:
|
||||
self._ctrl_context = "Switch"
|
||||
self._statements = _outer_case
|
||||
|
||||
def Default(self):
|
||||
return self.Case()
|
||||
|
||||
@contextmanager
|
||||
def FSM(self, reset=None, domain="sync", name="fsm"):
|
||||
self._check_context("FSM", context=None)
|
||||
if domain == "comb":
|
||||
raise ValueError("FSM may not be driven by the '{}' domain".format(domain))
|
||||
fsm_data = self._set_ctrl("FSM", {
|
||||
"name": name,
|
||||
"signal": Signal(name="{}_state".format(name), src_loc_at=2),
|
||||
"reset": reset,
|
||||
"domain": domain,
|
||||
"encoding": OrderedDict(),
|
||||
"decoding": OrderedDict(),
|
||||
"states": OrderedDict(),
|
||||
"src_loc": tracer.get_src_loc(src_loc_at=1),
|
||||
"state_src_locs": {},
|
||||
})
|
||||
self._generated[name] = fsm = \
|
||||
FSM(fsm_data["signal"], fsm_data["encoding"], fsm_data["decoding"])
|
||||
try:
|
||||
self._ctrl_context = "FSM"
|
||||
self.domain._depth += 1
|
||||
yield fsm
|
||||
for state_name in fsm_data["encoding"]:
|
||||
if state_name not in fsm_data["states"]:
|
||||
raise NameError("FSM state '{}' is referenced but not defined"
|
||||
.format(state_name))
|
||||
finally:
|
||||
self.domain._depth -= 1
|
||||
self._ctrl_context = None
|
||||
self._pop_ctrl()
|
||||
|
||||
@contextmanager
|
||||
def State(self, name):
|
||||
self._check_context("FSM State", context="FSM")
|
||||
src_loc = tracer.get_src_loc(src_loc_at=1)
|
||||
fsm_data = self._get_ctrl("FSM")
|
||||
if name in fsm_data["states"]:
|
||||
raise NameError("FSM state '{}' is already defined".format(name))
|
||||
if name not in fsm_data["encoding"]:
|
||||
fsm_data["encoding"][name] = len(fsm_data["encoding"])
|
||||
try:
|
||||
_outer_case, self._statements = self._statements, []
|
||||
self._ctrl_context = None
|
||||
yield
|
||||
self._flush_ctrl()
|
||||
fsm_data["states"][name] = self._statements
|
||||
fsm_data["state_src_locs"][name] = src_loc
|
||||
finally:
|
||||
self._ctrl_context = "FSM"
|
||||
self._statements = _outer_case
|
||||
|
||||
@property
|
||||
def next(self):
|
||||
raise SyntaxError("Only assignment to `m.next` is permitted")
|
||||
|
||||
@next.setter
|
||||
def next(self, name):
|
||||
if self._ctrl_context != "FSM":
|
||||
for level, (ctrl_name, ctrl_data) in enumerate(reversed(self._ctrl_stack)):
|
||||
if ctrl_name == "FSM":
|
||||
if name not in ctrl_data["encoding"]:
|
||||
ctrl_data["encoding"][name] = len(ctrl_data["encoding"])
|
||||
self._add_statement(
|
||||
assigns=[ctrl_data["signal"].eq(ctrl_data["encoding"][name])],
|
||||
domain=ctrl_data["domain"],
|
||||
depth=len(self._ctrl_stack))
|
||||
return
|
||||
|
||||
raise SyntaxError("`m.next = <...>` is only permitted inside an FSM state")
|
||||
|
||||
def _pop_ctrl(self):
|
||||
name, data = self._ctrl_stack.pop()
|
||||
src_loc = data["src_loc"]
|
||||
|
||||
if name == "If":
|
||||
if_tests, if_bodies = data["tests"], data["bodies"]
|
||||
if_src_locs = data["src_locs"]
|
||||
|
||||
tests, cases = [], OrderedDict()
|
||||
for if_test, if_case in zip(if_tests + [None], if_bodies):
|
||||
if if_test is not None:
|
||||
if len(if_test) != 1:
|
||||
if_test = if_test.bool()
|
||||
tests.append(if_test)
|
||||
|
||||
if if_test is not None:
|
||||
match = ("1" + "-" * (len(tests) - 1)).rjust(len(if_tests), "-")
|
||||
else:
|
||||
match = None
|
||||
cases[match] = if_case
|
||||
|
||||
self._statements.append(Switch(Cat(tests), cases,
|
||||
src_loc=src_loc, case_src_locs=dict(zip(cases, if_src_locs))))
|
||||
|
||||
if name == "Switch":
|
||||
switch_test, switch_cases = data["test"], data["cases"]
|
||||
switch_case_src_locs = data["case_src_locs"]
|
||||
|
||||
self._statements.append(Switch(switch_test, switch_cases,
|
||||
src_loc=src_loc, case_src_locs=switch_case_src_locs))
|
||||
|
||||
if name == "FSM":
|
||||
fsm_signal, fsm_reset, fsm_encoding, fsm_decoding, fsm_states = \
|
||||
data["signal"], data["reset"], data["encoding"], data["decoding"], data["states"]
|
||||
fsm_state_src_locs = data["state_src_locs"]
|
||||
if not fsm_states:
|
||||
return
|
||||
fsm_signal.width = bits_for(len(fsm_encoding) - 1)
|
||||
if fsm_reset is None:
|
||||
fsm_signal.reset = fsm_encoding[next(iter(fsm_states))]
|
||||
else:
|
||||
fsm_signal.reset = fsm_encoding[fsm_reset]
|
||||
# The FSM is encoded such that the state with encoding 0 is always the reset state.
|
||||
fsm_decoding.update((n, s) for s, n in fsm_encoding.items())
|
||||
fsm_signal.decoder = lambda n: "{}/{}".format(fsm_decoding[n], n)
|
||||
self._statements.append(Switch(fsm_signal,
|
||||
OrderedDict((fsm_encoding[name], stmts) for name, stmts in fsm_states.items()),
|
||||
src_loc=src_loc, case_src_locs={fsm_encoding[name]: fsm_state_src_locs[name]
|
||||
for name in fsm_states}))
|
||||
|
||||
def _add_statement(self, assigns, domain, depth, compat_mode=False):
|
||||
def domain_name(domain):
|
||||
if domain is None:
|
||||
return "comb"
|
||||
else:
|
||||
return domain
|
||||
|
||||
while len(self._ctrl_stack) > self.domain._depth:
|
||||
self._pop_ctrl()
|
||||
|
||||
for stmt in Statement.cast(assigns):
|
||||
if not compat_mode and not isinstance(stmt, (Assign, Assert, Assume, Cover)):
|
||||
raise SyntaxError(
|
||||
"Only assignments and property checks may be appended to d.{}"
|
||||
.format(domain_name(domain)))
|
||||
|
||||
stmt._MustUse__used = True
|
||||
stmt = SampleDomainInjector(domain)(stmt)
|
||||
|
||||
for signal in stmt._lhs_signals():
|
||||
if signal not in self._driving:
|
||||
self._driving[signal] = domain
|
||||
elif self._driving[signal] != domain:
|
||||
cd_curr = self._driving[signal]
|
||||
raise SyntaxError(
|
||||
"Driver-driver conflict: trying to drive {!r} from d.{}, but it is "
|
||||
"already driven from d.{}"
|
||||
.format(signal, domain_name(domain), domain_name(cd_curr)))
|
||||
|
||||
self._statements.append(stmt)
|
||||
|
||||
def _add_submodule(self, submodule, name=None):
|
||||
if not hasattr(submodule, "elaborate"):
|
||||
raise TypeError("Trying to add {!r}, which does not implement .elaborate(), as "
|
||||
"a submodule".format(submodule))
|
||||
if name == None:
|
||||
self._anon_submodules.append(submodule)
|
||||
else:
|
||||
if name in self._named_submodules:
|
||||
raise NameError("Submodule named '{}' already exists".format(name))
|
||||
self._named_submodules[name] = submodule
|
||||
|
||||
def _get_submodule(self, name):
|
||||
if name in self._named_submodules:
|
||||
return self._named_submodules[name]
|
||||
else:
|
||||
raise AttributeError("No submodule named '{}' exists".format(name))
|
||||
|
||||
def _add_domain(self, cd):
|
||||
if cd.name in self._domains:
|
||||
raise NameError("Clock domain named '{}' already exists".format(cd.name))
|
||||
self._domains[cd.name] = cd
|
||||
|
||||
def _flush(self):
|
||||
while self._ctrl_stack:
|
||||
self._pop_ctrl()
|
||||
|
||||
def elaborate(self, platform):
|
||||
self._flush()
|
||||
|
||||
fragment = Fragment()
|
||||
for name in self._named_submodules:
|
||||
fragment.add_subfragment(Fragment.get(self._named_submodules[name], platform), name)
|
||||
for submodule in self._anon_submodules:
|
||||
fragment.add_subfragment(Fragment.get(submodule, platform), None)
|
||||
statements = SampleDomainInjector("sync")(self._statements)
|
||||
fragment.add_statements(statements)
|
||||
for signal, domain in self._driving.items():
|
||||
fragment.add_driver(signal, domain)
|
||||
fragment.add_domains(self._domains.values())
|
||||
fragment.generated.update(self._generated)
|
||||
return fragment
|
||||
warnings.warn("instead of nmigen.hdl.dsl, use amaranth.hdl.dsl",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
597
nmigen/hdl/ir.py
597
nmigen/hdl/ir.py
|
|
@ -1,592 +1,7 @@
|
|||
from abc import ABCMeta
|
||||
from collections import defaultdict, OrderedDict
|
||||
from functools import reduce
|
||||
from amaranth.hdl.ir import *
|
||||
from amaranth.hdl.ir import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from .._utils import *
|
||||
from .._unused import *
|
||||
from .ast import *
|
||||
from .cd import *
|
||||
|
||||
|
||||
__all__ = ["UnusedElaboratable", "Elaboratable", "DriverConflict", "Fragment", "Instance"]
|
||||
|
||||
|
||||
class UnusedElaboratable(UnusedMustUse):
|
||||
pass
|
||||
|
||||
|
||||
class Elaboratable(MustUse, metaclass=ABCMeta):
|
||||
_MustUse__warning = UnusedElaboratable
|
||||
|
||||
|
||||
class DriverConflict(UserWarning):
|
||||
pass
|
||||
|
||||
|
||||
class Fragment:
|
||||
@staticmethod
|
||||
def get(obj, platform):
|
||||
code = None
|
||||
while True:
|
||||
if isinstance(obj, Fragment):
|
||||
return obj
|
||||
elif isinstance(obj, Elaboratable):
|
||||
code = obj.elaborate.__code__
|
||||
obj._MustUse__used = True
|
||||
obj = obj.elaborate(platform)
|
||||
elif hasattr(obj, "elaborate"):
|
||||
warnings.warn(
|
||||
message="Class {!r} is an elaboratable that does not explicitly inherit from "
|
||||
"Elaboratable; doing so would improve diagnostics"
|
||||
.format(type(obj)),
|
||||
category=RuntimeWarning,
|
||||
stacklevel=2)
|
||||
code = obj.elaborate.__code__
|
||||
obj = obj.elaborate(platform)
|
||||
else:
|
||||
raise AttributeError("Object {!r} cannot be elaborated".format(obj))
|
||||
if obj is None and code is not None:
|
||||
warnings.warn_explicit(
|
||||
message=".elaborate() returned None; missing return statement?",
|
||||
category=UserWarning,
|
||||
filename=code.co_filename,
|
||||
lineno=code.co_firstlineno)
|
||||
|
||||
def __init__(self):
|
||||
self.ports = SignalDict()
|
||||
self.drivers = OrderedDict()
|
||||
self.statements = []
|
||||
self.domains = OrderedDict()
|
||||
self.subfragments = []
|
||||
self.attrs = OrderedDict()
|
||||
self.generated = OrderedDict()
|
||||
self.flatten = False
|
||||
|
||||
def add_ports(self, *ports, dir):
|
||||
assert dir in ("i", "o", "io")
|
||||
for port in flatten(ports):
|
||||
self.ports[port] = dir
|
||||
|
||||
def iter_ports(self, dir=None):
|
||||
if dir is None:
|
||||
yield from self.ports
|
||||
else:
|
||||
for port, port_dir in self.ports.items():
|
||||
if port_dir == dir:
|
||||
yield port
|
||||
|
||||
def add_driver(self, signal, domain=None):
|
||||
if domain not in self.drivers:
|
||||
self.drivers[domain] = SignalSet()
|
||||
self.drivers[domain].add(signal)
|
||||
|
||||
def iter_drivers(self):
|
||||
for domain, signals in self.drivers.items():
|
||||
for signal in signals:
|
||||
yield domain, signal
|
||||
|
||||
def iter_comb(self):
|
||||
if None in self.drivers:
|
||||
yield from self.drivers[None]
|
||||
|
||||
def iter_sync(self):
|
||||
for domain, signals in self.drivers.items():
|
||||
if domain is None:
|
||||
continue
|
||||
for signal in signals:
|
||||
yield domain, signal
|
||||
|
||||
def iter_signals(self):
|
||||
signals = SignalSet()
|
||||
signals |= self.ports.keys()
|
||||
for domain, domain_signals in self.drivers.items():
|
||||
if domain is not None:
|
||||
cd = self.domains[domain]
|
||||
signals.add(cd.clk)
|
||||
if cd.rst is not None:
|
||||
signals.add(cd.rst)
|
||||
signals |= domain_signals
|
||||
return signals
|
||||
|
||||
def add_domains(self, *domains):
|
||||
for domain in flatten(domains):
|
||||
assert isinstance(domain, ClockDomain)
|
||||
assert domain.name not in self.domains
|
||||
self.domains[domain.name] = domain
|
||||
|
||||
def iter_domains(self):
|
||||
yield from self.domains
|
||||
|
||||
def add_statements(self, *stmts):
|
||||
for stmt in Statement.cast(stmts):
|
||||
stmt._MustUse__used = True
|
||||
self.statements.append(stmt)
|
||||
|
||||
def add_subfragment(self, subfragment, name=None):
|
||||
assert isinstance(subfragment, Fragment)
|
||||
self.subfragments.append((subfragment, name))
|
||||
|
||||
def find_subfragment(self, name_or_index):
|
||||
if isinstance(name_or_index, int):
|
||||
if name_or_index < len(self.subfragments):
|
||||
subfragment, name = self.subfragments[name_or_index]
|
||||
return subfragment
|
||||
raise NameError("No subfragment at index #{}".format(name_or_index))
|
||||
else:
|
||||
for subfragment, name in self.subfragments:
|
||||
if name == name_or_index:
|
||||
return subfragment
|
||||
raise NameError("No subfragment with name '{}'".format(name_or_index))
|
||||
|
||||
def find_generated(self, *path):
|
||||
if len(path) > 1:
|
||||
path_component, *path = path
|
||||
return self.find_subfragment(path_component).find_generated(*path)
|
||||
else:
|
||||
item, = path
|
||||
return self.generated[item]
|
||||
|
||||
def elaborate(self, platform):
|
||||
return self
|
||||
|
||||
def _merge_subfragment(self, subfragment):
|
||||
# Merge subfragment's everything except clock domains into this fragment.
|
||||
# Flattening is done after clock domain propagation, so we can assume the domains
|
||||
# are already the same in every involved fragment in the first place.
|
||||
self.ports.update(subfragment.ports)
|
||||
for domain, signal in subfragment.iter_drivers():
|
||||
self.add_driver(signal, domain)
|
||||
self.statements += subfragment.statements
|
||||
self.subfragments += subfragment.subfragments
|
||||
|
||||
# Remove the merged subfragment.
|
||||
found = False
|
||||
for i, (check_subfrag, check_name) in enumerate(self.subfragments): # :nobr:
|
||||
if subfragment == check_subfrag:
|
||||
del self.subfragments[i]
|
||||
found = True
|
||||
break
|
||||
assert found
|
||||
|
||||
def _resolve_hierarchy_conflicts(self, hierarchy=("top",), mode="warn"):
|
||||
assert mode in ("silent", "warn", "error")
|
||||
|
||||
driver_subfrags = SignalDict()
|
||||
memory_subfrags = OrderedDict()
|
||||
def add_subfrag(registry, entity, entry):
|
||||
# Because of missing domain insertion, at the point when this code runs, we have
|
||||
# a mixture of bound and unbound {Clock,Reset}Signals. Map the bound ones to
|
||||
# the actual signals (because the signal itself can be driven as well); but leave
|
||||
# the unbound ones as it is, because there's no concrete signal for it yet anyway.
|
||||
if isinstance(entity, ClockSignal) and entity.domain in self.domains:
|
||||
entity = self.domains[entity.domain].clk
|
||||
elif isinstance(entity, ResetSignal) and entity.domain in self.domains:
|
||||
entity = self.domains[entity.domain].rst
|
||||
|
||||
if entity not in registry:
|
||||
registry[entity] = set()
|
||||
registry[entity].add(entry)
|
||||
|
||||
# For each signal driven by this fragment and/or its subfragments, determine which
|
||||
# subfragments also drive it.
|
||||
for domain, signal in self.iter_drivers():
|
||||
add_subfrag(driver_subfrags, signal, (None, hierarchy))
|
||||
|
||||
flatten_subfrags = set()
|
||||
for i, (subfrag, name) in enumerate(self.subfragments):
|
||||
if name is None:
|
||||
name = "<unnamed #{}>".format(i)
|
||||
subfrag_hierarchy = hierarchy + (name,)
|
||||
|
||||
if subfrag.flatten:
|
||||
# Always flatten subfragments that explicitly request it.
|
||||
flatten_subfrags.add((subfrag, subfrag_hierarchy))
|
||||
|
||||
if isinstance(subfrag, Instance):
|
||||
# For memories (which are subfragments, but semantically a part of superfragment),
|
||||
# record that this fragment is driving it.
|
||||
if subfrag.type in ("$memrd", "$memwr"):
|
||||
memory = subfrag.parameters["MEMID"]
|
||||
add_subfrag(memory_subfrags, memory, (None, hierarchy))
|
||||
|
||||
# Never flatten instances.
|
||||
continue
|
||||
|
||||
# First, recurse into subfragments and let them detect driver conflicts as well.
|
||||
subfrag_drivers, subfrag_memories = \
|
||||
subfrag._resolve_hierarchy_conflicts(subfrag_hierarchy, mode)
|
||||
|
||||
# Second, classify subfragments by signals they drive and memories they use.
|
||||
for signal in subfrag_drivers:
|
||||
add_subfrag(driver_subfrags, signal, (subfrag, subfrag_hierarchy))
|
||||
for memory in subfrag_memories:
|
||||
add_subfrag(memory_subfrags, memory, (subfrag, subfrag_hierarchy))
|
||||
|
||||
# Find out the set of subfragments that needs to be flattened into this fragment
|
||||
# to resolve driver-driver conflicts.
|
||||
def flatten_subfrags_if_needed(subfrags):
|
||||
if len(subfrags) == 1:
|
||||
return []
|
||||
flatten_subfrags.update((f, h) for f, h in subfrags if f is not None)
|
||||
return list(sorted(".".join(h) for f, h in subfrags))
|
||||
|
||||
for signal, subfrags in driver_subfrags.items():
|
||||
subfrag_names = flatten_subfrags_if_needed(subfrags)
|
||||
if not subfrag_names:
|
||||
continue
|
||||
|
||||
# While we're at it, show a message.
|
||||
message = ("Signal '{}' is driven from multiple fragments: {}"
|
||||
.format(signal, ", ".join(subfrag_names)))
|
||||
if mode == "error":
|
||||
raise DriverConflict(message)
|
||||
elif mode == "warn":
|
||||
message += "; hierarchy will be flattened"
|
||||
warnings.warn_explicit(message, DriverConflict, *signal.src_loc)
|
||||
|
||||
for memory, subfrags in memory_subfrags.items():
|
||||
subfrag_names = flatten_subfrags_if_needed(subfrags)
|
||||
if not subfrag_names:
|
||||
continue
|
||||
|
||||
# While we're at it, show a message.
|
||||
message = ("Memory '{}' is accessed from multiple fragments: {}"
|
||||
.format(memory.name, ", ".join(subfrag_names)))
|
||||
if mode == "error":
|
||||
raise DriverConflict(message)
|
||||
elif mode == "warn":
|
||||
message += "; hierarchy will be flattened"
|
||||
warnings.warn_explicit(message, DriverConflict, *memory.src_loc)
|
||||
|
||||
# Flatten hierarchy.
|
||||
for subfrag, subfrag_hierarchy in sorted(flatten_subfrags, key=lambda x: x[1]):
|
||||
self._merge_subfragment(subfrag)
|
||||
|
||||
# If we flattened anything, we might be in a situation where we have a driver conflict
|
||||
# again, e.g. if we had a tree of fragments like A --- B --- C where only fragments
|
||||
# A and C were driving a signal S. In that case, since B is not driving S itself,
|
||||
# processing B will not result in any flattening, but since B is transitively driving S,
|
||||
# processing A will flatten B into it. Afterwards, we have a tree like AB --- C, which
|
||||
# has another conflict.
|
||||
if any(flatten_subfrags):
|
||||
# Try flattening again.
|
||||
return self._resolve_hierarchy_conflicts(hierarchy, mode)
|
||||
|
||||
# Nothing was flattened, we're done!
|
||||
return (SignalSet(driver_subfrags.keys()),
|
||||
set(memory_subfrags.keys()))
|
||||
|
||||
def _propagate_domains_up(self, hierarchy=("top",)):
|
||||
from .xfrm import DomainRenamer
|
||||
|
||||
domain_subfrags = defaultdict(lambda: set())
|
||||
|
||||
# For each domain defined by a subfragment, determine which subfragments define it.
|
||||
for i, (subfrag, name) in enumerate(self.subfragments):
|
||||
# First, recurse into subfragments and let them propagate domains up as well.
|
||||
hier_name = name
|
||||
if hier_name is None:
|
||||
hier_name = "<unnamed #{}>".format(i)
|
||||
subfrag._propagate_domains_up(hierarchy + (hier_name,))
|
||||
|
||||
# Second, classify subfragments by domains they define.
|
||||
for domain_name, domain in subfrag.domains.items():
|
||||
if domain.local:
|
||||
continue
|
||||
domain_subfrags[domain_name].add((subfrag, name, i))
|
||||
|
||||
# For each domain defined by more than one subfragment, rename the domain in each
|
||||
# of the subfragments such that they no longer conflict.
|
||||
for domain_name, subfrags in domain_subfrags.items():
|
||||
if len(subfrags) == 1:
|
||||
continue
|
||||
|
||||
names = [n for f, n, i in subfrags]
|
||||
if not all(names):
|
||||
names = sorted("<unnamed #{}>".format(i) if n is None else "'{}'".format(n)
|
||||
for f, n, i in subfrags)
|
||||
raise DomainError("Domain '{}' is defined by subfragments {} of fragment '{}'; "
|
||||
"it is necessary to either rename subfragment domains "
|
||||
"explicitly, or give names to subfragments"
|
||||
.format(domain_name, ", ".join(names), ".".join(hierarchy)))
|
||||
|
||||
if len(names) != len(set(names)):
|
||||
names = sorted("#{}".format(i) for f, n, i in subfrags)
|
||||
raise DomainError("Domain '{}' is defined by subfragments {} of fragment '{}', "
|
||||
"some of which have identical names; it is necessary to either "
|
||||
"rename subfragment domains explicitly, or give distinct names "
|
||||
"to subfragments"
|
||||
.format(domain_name, ", ".join(names), ".".join(hierarchy)))
|
||||
|
||||
for subfrag, name, i in subfrags:
|
||||
domain_name_map = {domain_name: "{}_{}".format(name, domain_name)}
|
||||
self.subfragments[i] = (DomainRenamer(domain_name_map)(subfrag), name)
|
||||
|
||||
# Finally, collect the (now unique) subfragment domains, and merge them into our domains.
|
||||
for subfrag, name in self.subfragments:
|
||||
for domain_name, domain in subfrag.domains.items():
|
||||
if domain.local:
|
||||
continue
|
||||
self.add_domains(domain)
|
||||
|
||||
def _propagate_domains_down(self):
|
||||
# For each domain defined in this fragment, ensure it also exists in all subfragments.
|
||||
for subfrag, name in self.subfragments:
|
||||
for domain in self.iter_domains():
|
||||
if domain in subfrag.domains:
|
||||
assert self.domains[domain] is subfrag.domains[domain]
|
||||
else:
|
||||
subfrag.add_domains(self.domains[domain])
|
||||
|
||||
subfrag._propagate_domains_down()
|
||||
|
||||
def _create_missing_domains(self, missing_domain, *, platform=None):
|
||||
from .xfrm import DomainCollector
|
||||
|
||||
collector = DomainCollector()
|
||||
collector(self)
|
||||
|
||||
new_domains = []
|
||||
for domain_name in collector.used_domains - collector.defined_domains:
|
||||
if domain_name is None:
|
||||
continue
|
||||
value = missing_domain(domain_name)
|
||||
if value is None:
|
||||
raise DomainError("Domain '{}' is used but not defined".format(domain_name))
|
||||
if type(value) is ClockDomain:
|
||||
self.add_domains(value)
|
||||
# And expose ports on the newly added clock domain, since it is added directly
|
||||
# and there was no chance to add any logic driving it.
|
||||
new_domains.append(value)
|
||||
else:
|
||||
new_fragment = Fragment.get(value, platform=platform)
|
||||
if domain_name not in new_fragment.domains:
|
||||
defined = new_fragment.domains.keys()
|
||||
raise DomainError(
|
||||
"Fragment returned by missing domain callback does not define "
|
||||
"requested domain '{}' (defines {})."
|
||||
.format(domain_name, ", ".join("'{}'".format(n) for n in defined)))
|
||||
self.add_subfragment(new_fragment, "cd_{}".format(domain_name))
|
||||
self.add_domains(new_fragment.domains.values())
|
||||
return new_domains
|
||||
|
||||
def _propagate_domains(self, missing_domain, *, platform=None):
|
||||
self._propagate_domains_up()
|
||||
self._propagate_domains_down()
|
||||
self._resolve_hierarchy_conflicts()
|
||||
new_domains = self._create_missing_domains(missing_domain, platform=platform)
|
||||
self._propagate_domains_down()
|
||||
return new_domains
|
||||
|
||||
def _prepare_use_def_graph(self, parent, level, uses, defs, ios, top):
|
||||
def add_uses(*sigs, self=self):
|
||||
for sig in flatten(sigs):
|
||||
if sig not in uses:
|
||||
uses[sig] = set()
|
||||
uses[sig].add(self)
|
||||
|
||||
def add_defs(*sigs):
|
||||
for sig in flatten(sigs):
|
||||
if sig not in defs:
|
||||
defs[sig] = self
|
||||
else:
|
||||
assert defs[sig] is self
|
||||
|
||||
def add_io(*sigs):
|
||||
for sig in flatten(sigs):
|
||||
if sig not in ios:
|
||||
ios[sig] = self
|
||||
else:
|
||||
assert ios[sig] is self
|
||||
|
||||
# Collect all signals we're driving (on LHS of statements), and signals we're using
|
||||
# (on RHS of statements, or in clock domains).
|
||||
for stmt in self.statements:
|
||||
add_uses(stmt._rhs_signals())
|
||||
add_defs(stmt._lhs_signals())
|
||||
|
||||
for domain, _ in self.iter_sync():
|
||||
cd = self.domains[domain]
|
||||
add_uses(cd.clk)
|
||||
if cd.rst is not None:
|
||||
add_uses(cd.rst)
|
||||
|
||||
# Repeat for subfragments.
|
||||
for subfrag, name in self.subfragments:
|
||||
if isinstance(subfrag, Instance):
|
||||
for port_name, (value, dir) in subfrag.named_ports.items():
|
||||
if dir == "i":
|
||||
# Prioritize defs over uses.
|
||||
rhs_without_outputs = value._rhs_signals() - subfrag.iter_ports(dir="o")
|
||||
subfrag.add_ports(rhs_without_outputs, dir=dir)
|
||||
add_uses(value._rhs_signals())
|
||||
if dir == "o":
|
||||
subfrag.add_ports(value._lhs_signals(), dir=dir)
|
||||
add_defs(value._lhs_signals())
|
||||
if dir == "io":
|
||||
subfrag.add_ports(value._lhs_signals(), dir=dir)
|
||||
add_io(value._lhs_signals())
|
||||
else:
|
||||
parent[subfrag] = self
|
||||
level [subfrag] = level[self] + 1
|
||||
|
||||
subfrag._prepare_use_def_graph(parent, level, uses, defs, ios, top)
|
||||
|
||||
def _propagate_ports(self, ports, all_undef_as_ports):
|
||||
# Take this fragment graph:
|
||||
#
|
||||
# __ B (def: q, use: p r)
|
||||
# /
|
||||
# A (def: p, use: q r)
|
||||
# \
|
||||
# \_ C (def: r, use: p q)
|
||||
#
|
||||
# We need to consider three cases.
|
||||
# 1. Signal p requires an input port in B;
|
||||
# 2. Signal r requires an output port in C;
|
||||
# 3. Signal r requires an output port in C and an input port in B.
|
||||
#
|
||||
# Adding these ports can be in general done in three steps for each signal:
|
||||
# 1. Find the least common ancestor of all uses and defs.
|
||||
# 2. Going upwards from the single def, add output ports.
|
||||
# 3. Going upwards from all uses, add input ports.
|
||||
|
||||
parent = {self: None}
|
||||
level = {self: 0}
|
||||
uses = SignalDict()
|
||||
defs = SignalDict()
|
||||
ios = SignalDict()
|
||||
self._prepare_use_def_graph(parent, level, uses, defs, ios, self)
|
||||
|
||||
ports = SignalSet(ports)
|
||||
if all_undef_as_ports:
|
||||
for sig in uses:
|
||||
if sig in defs:
|
||||
continue
|
||||
ports.add(sig)
|
||||
for sig in ports:
|
||||
if sig not in uses:
|
||||
uses[sig] = set()
|
||||
uses[sig].add(self)
|
||||
|
||||
@memoize
|
||||
def lca_of(fragu, fragv):
|
||||
# Normalize fragu to be deeper than fragv.
|
||||
if level[fragu] < level[fragv]:
|
||||
fragu, fragv = fragv, fragu
|
||||
# Find ancestor of fragu on the same level as fragv.
|
||||
for _ in range(level[fragu] - level[fragv]):
|
||||
fragu = parent[fragu]
|
||||
# If fragv was the ancestor of fragv, we're done.
|
||||
if fragu == fragv:
|
||||
return fragu
|
||||
# Otherwise, they are at the same level but in different branches. Step both fragu
|
||||
# and fragv until we find the common ancestor.
|
||||
while parent[fragu] != parent[fragv]:
|
||||
fragu = parent[fragu]
|
||||
fragv = parent[fragv]
|
||||
return parent[fragu]
|
||||
|
||||
for sig in uses:
|
||||
if sig in defs:
|
||||
lca = reduce(lca_of, uses[sig], defs[sig])
|
||||
else:
|
||||
lca = reduce(lca_of, uses[sig])
|
||||
|
||||
for frag in uses[sig]:
|
||||
if sig in defs and frag is defs[sig]:
|
||||
continue
|
||||
while frag != lca:
|
||||
frag.add_ports(sig, dir="i")
|
||||
frag = parent[frag]
|
||||
|
||||
if sig in defs:
|
||||
frag = defs[sig]
|
||||
while frag != lca:
|
||||
frag.add_ports(sig, dir="o")
|
||||
frag = parent[frag]
|
||||
|
||||
for sig in ios:
|
||||
frag = ios[sig]
|
||||
while frag is not None:
|
||||
frag.add_ports(sig, dir="io")
|
||||
frag = parent[frag]
|
||||
|
||||
for sig in ports:
|
||||
if sig in ios:
|
||||
continue
|
||||
if sig in defs:
|
||||
self.add_ports(sig, dir="o")
|
||||
else:
|
||||
self.add_ports(sig, dir="i")
|
||||
|
||||
def prepare(self, ports=None, missing_domain=lambda name: ClockDomain(name)):
|
||||
from .xfrm import SampleLowerer, DomainLowerer
|
||||
|
||||
fragment = SampleLowerer()(self)
|
||||
new_domains = fragment._propagate_domains(missing_domain)
|
||||
fragment = DomainLowerer()(fragment)
|
||||
if ports is None:
|
||||
fragment._propagate_ports(ports=(), all_undef_as_ports=True)
|
||||
else:
|
||||
if not isinstance(ports, tuple) and not isinstance(ports, list):
|
||||
msg = "`ports` must be either a list or a tuple, not {!r}"\
|
||||
.format(ports)
|
||||
if isinstance(ports, Value):
|
||||
msg += " (did you mean `ports=(<signal>,)`, rather than `ports=<signal>`?)"
|
||||
raise TypeError(msg)
|
||||
mapped_ports = []
|
||||
# Lower late bound signals like ClockSignal() to ports.
|
||||
port_lowerer = DomainLowerer(fragment.domains)
|
||||
for port in ports:
|
||||
if not isinstance(port, (Signal, ClockSignal, ResetSignal)):
|
||||
raise TypeError("Only signals may be added as ports, not {!r}"
|
||||
.format(port))
|
||||
mapped_ports.append(port_lowerer.on_value(port))
|
||||
# Add ports for all newly created missing clock domains, since not doing so defeats
|
||||
# the purpose of domain auto-creation. (It's possible to refer to these ports before
|
||||
# the domain actually exists through late binding, but it's inconvenient.)
|
||||
for cd in new_domains:
|
||||
mapped_ports.append(cd.clk)
|
||||
if cd.rst is not None:
|
||||
mapped_ports.append(cd.rst)
|
||||
fragment._propagate_ports(ports=mapped_ports, all_undef_as_ports=False)
|
||||
return fragment
|
||||
|
||||
|
||||
class Instance(Fragment):
|
||||
def __init__(self, type, *args, **kwargs):
|
||||
super().__init__()
|
||||
|
||||
self.type = type
|
||||
self.parameters = OrderedDict()
|
||||
self.named_ports = OrderedDict()
|
||||
|
||||
for (kind, name, value) in args:
|
||||
if kind == "a":
|
||||
self.attrs[name] = value
|
||||
elif kind == "p":
|
||||
self.parameters[name] = value
|
||||
elif kind in ("i", "o", "io"):
|
||||
self.named_ports[name] = (Value.cast(value), kind)
|
||||
else:
|
||||
raise NameError("Instance argument {!r} should be a tuple (kind, name, value) "
|
||||
"where kind is one of \"a\", \"p\", \"i\", \"o\", or \"io\""
|
||||
.format((kind, name, value)))
|
||||
|
||||
for kw, arg in kwargs.items():
|
||||
if kw.startswith("a_"):
|
||||
self.attrs[kw[2:]] = arg
|
||||
elif kw.startswith("p_"):
|
||||
self.parameters[kw[2:]] = arg
|
||||
elif kw.startswith("i_"):
|
||||
self.named_ports[kw[2:]] = (Value.cast(arg), "i")
|
||||
elif kw.startswith("o_"):
|
||||
self.named_ports[kw[2:]] = (Value.cast(arg), "o")
|
||||
elif kw.startswith("io_"):
|
||||
self.named_ports[kw[3:]] = (Value.cast(arg), "io")
|
||||
else:
|
||||
raise NameError("Instance keyword argument {}={!r} does not start with one of "
|
||||
"\"a_\", \"p_\", \"i_\", \"o_\", or \"io_\""
|
||||
.format(kw, arg))
|
||||
warnings.warn("instead of nmigen.hdl.ir, use amaranth.hdl.ir",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,322 +1,7 @@
|
|||
import operator
|
||||
from collections import OrderedDict
|
||||
|
||||
from .. import tracer
|
||||
from .ast import *
|
||||
from .ir import Elaboratable, Instance
|
||||
from amaranth.hdl.mem import *
|
||||
from amaranth.hdl.mem import __all__
|
||||
|
||||
|
||||
__all__ = ["Memory", "ReadPort", "WritePort", "DummyPort"]
|
||||
|
||||
|
||||
class Memory:
|
||||
"""A word addressable storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Access granularity. Each storage element of this memory is ``width`` bits in size.
|
||||
depth : int
|
||||
Word count. This memory contains ``depth`` storage elements.
|
||||
init : list of int
|
||||
Initial values. At power on, each storage element in this memory is initialized to
|
||||
the corresponding element of ``init``, if any, or to zero otherwise.
|
||||
Uninitialized memories are not currently supported.
|
||||
name : str
|
||||
Name hint for this memory. If ``None`` (default) the name is inferred from the variable
|
||||
name this ``Signal`` is assigned to.
|
||||
attrs : dict
|
||||
Dictionary of synthesis attributes.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
width : int
|
||||
depth : int
|
||||
init : list of int
|
||||
attrs : dict
|
||||
"""
|
||||
def __init__(self, *, width, depth, init=None, name=None, attrs=None, simulate=True):
|
||||
if not isinstance(width, int) or width < 0:
|
||||
raise TypeError("Memory width must be a non-negative integer, not {!r}"
|
||||
.format(width))
|
||||
if not isinstance(depth, int) or depth < 0:
|
||||
raise TypeError("Memory depth must be a non-negative integer, not {!r}"
|
||||
.format(depth))
|
||||
|
||||
self.name = name or tracer.get_var_name(depth=2, default="$memory")
|
||||
self.src_loc = tracer.get_src_loc()
|
||||
|
||||
self.width = width
|
||||
self.depth = depth
|
||||
self.attrs = OrderedDict(() if attrs is None else attrs)
|
||||
|
||||
# Array of signals for simulation.
|
||||
self._array = Array()
|
||||
if simulate:
|
||||
for addr in range(self.depth):
|
||||
self._array.append(Signal(self.width, name="{}({})"
|
||||
.format(name or "memory", addr)))
|
||||
|
||||
self.init = init
|
||||
|
||||
@property
|
||||
def init(self):
|
||||
return self._init
|
||||
|
||||
@init.setter
|
||||
def init(self, new_init):
|
||||
self._init = [] if new_init is None else list(new_init)
|
||||
if len(self.init) > self.depth:
|
||||
raise ValueError("Memory initialization value count exceed memory depth ({} > {})"
|
||||
.format(len(self.init), self.depth))
|
||||
|
||||
try:
|
||||
for addr in range(len(self._array)):
|
||||
if addr < len(self._init):
|
||||
self._array[addr].reset = operator.index(self._init[addr])
|
||||
else:
|
||||
self._array[addr].reset = 0
|
||||
except TypeError as e:
|
||||
raise TypeError("Memory initialization value at address {:x}: {}"
|
||||
.format(addr, e)) from None
|
||||
|
||||
def read_port(self, *, src_loc_at=0, **kwargs):
|
||||
"""Get a read port.
|
||||
|
||||
See :class:`ReadPort` for details.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
domain : str
|
||||
transparent : bool
|
||||
|
||||
Returns
|
||||
-------
|
||||
An instance of :class:`ReadPort` associated with this memory.
|
||||
"""
|
||||
return ReadPort(self, src_loc_at=1 + src_loc_at, **kwargs)
|
||||
|
||||
def write_port(self, *, src_loc_at=0, **kwargs):
|
||||
"""Get a write port.
|
||||
|
||||
See :class:`WritePort` for details.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
domain : str
|
||||
granularity : int
|
||||
|
||||
Returns
|
||||
-------
|
||||
An instance of :class:`WritePort` associated with this memory.
|
||||
"""
|
||||
return WritePort(self, src_loc_at=1 + src_loc_at, **kwargs)
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""Simulation only."""
|
||||
return self._array[index]
|
||||
|
||||
|
||||
class ReadPort(Elaboratable):
|
||||
"""A memory read port.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
memory : :class:`Memory`
|
||||
Memory associated with the port.
|
||||
domain : str
|
||||
Clock domain. Defaults to ``"sync"``. If set to ``"comb"``, the port is asynchronous.
|
||||
Otherwise, the read data becomes available on the next clock cycle.
|
||||
transparent : bool
|
||||
Port transparency. If set (default), a read at an address that is also being written to in
|
||||
the same clock cycle will output the new value. Otherwise, the old value will be output
|
||||
first. This behavior only applies to ports in the same domain.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
memory : :class:`Memory`
|
||||
domain : str
|
||||
transparent : bool
|
||||
addr : Signal(range(memory.depth)), in
|
||||
Read address.
|
||||
data : Signal(memory.width), out
|
||||
Read data.
|
||||
en : Signal or Const, in
|
||||
Read enable. If asserted, ``data`` is updated with the word stored at ``addr``. Note that
|
||||
transparent ports cannot assign ``en`` (which is hardwired to 1 instead), as doing so is
|
||||
currently not supported by Yosys.
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
Raises :exn:`ValueError` if the read port is simultaneously asynchronous and non-transparent.
|
||||
"""
|
||||
def __init__(self, memory, *, domain="sync", transparent=True, src_loc_at=0):
|
||||
if domain == "comb" and not transparent:
|
||||
raise ValueError("Read port cannot be simultaneously asynchronous and non-transparent")
|
||||
|
||||
self.memory = memory
|
||||
self.domain = domain
|
||||
self.transparent = transparent
|
||||
|
||||
self.addr = Signal(range(memory.depth),
|
||||
name="{}_r_addr".format(memory.name), src_loc_at=1 + src_loc_at)
|
||||
self.data = Signal(memory.width,
|
||||
name="{}_r_data".format(memory.name), src_loc_at=1 + src_loc_at)
|
||||
if self.domain != "comb" and not transparent:
|
||||
self.en = Signal(name="{}_r_en".format(memory.name), reset=1,
|
||||
src_loc_at=1 + src_loc_at)
|
||||
else:
|
||||
self.en = Const(1)
|
||||
|
||||
def elaborate(self, platform):
|
||||
f = Instance("$memrd",
|
||||
p_MEMID=self.memory,
|
||||
p_ABITS=self.addr.width,
|
||||
p_WIDTH=self.data.width,
|
||||
p_CLK_ENABLE=self.domain != "comb",
|
||||
p_CLK_POLARITY=1,
|
||||
p_TRANSPARENT=self.transparent,
|
||||
i_CLK=ClockSignal(self.domain) if self.domain != "comb" else Const(0),
|
||||
i_EN=self.en,
|
||||
i_ADDR=self.addr,
|
||||
o_DATA=self.data,
|
||||
)
|
||||
if self.domain == "comb":
|
||||
# Asynchronous port
|
||||
f.add_statements(self.data.eq(self.memory._array[self.addr]))
|
||||
f.add_driver(self.data)
|
||||
elif not self.transparent:
|
||||
# Synchronous, read-before-write port
|
||||
f.add_statements(
|
||||
Switch(self.en, {
|
||||
1: self.data.eq(self.memory._array[self.addr])
|
||||
})
|
||||
)
|
||||
f.add_driver(self.data, self.domain)
|
||||
else:
|
||||
# Synchronous, write-through port
|
||||
# This model is a bit unconventional. We model transparent ports as asynchronous ports
|
||||
# that are latched when the clock is high. This isn't exactly correct, but it is very
|
||||
# close to the correct behavior of a transparent port, and the difference should only
|
||||
# be observable in pathological cases of clock gating. A register is injected to
|
||||
# the address input to achieve the correct address-to-data latency. Also, the reset
|
||||
# value of the data output is forcibly set to the 0th initial value, if any--note that
|
||||
# many FPGAs do not guarantee this behavior!
|
||||
if len(self.memory.init) > 0:
|
||||
self.data.reset = operator.index(self.memory.init[0])
|
||||
latch_addr = Signal.like(self.addr)
|
||||
f.add_statements(
|
||||
latch_addr.eq(self.addr),
|
||||
Switch(ClockSignal(self.domain), {
|
||||
0: self.data.eq(self.data),
|
||||
1: self.data.eq(self.memory._array[latch_addr]),
|
||||
}),
|
||||
)
|
||||
f.add_driver(latch_addr, self.domain)
|
||||
f.add_driver(self.data)
|
||||
return f
|
||||
|
||||
|
||||
class WritePort(Elaboratable):
|
||||
"""A memory write port.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
memory : :class:`Memory`
|
||||
Memory associated with the port.
|
||||
domain : str
|
||||
Clock domain. Defaults to ``"sync"``. Writes have a latency of 1 clock cycle.
|
||||
granularity : int
|
||||
Port granularity. Defaults to ``memory.width``. Write data is split evenly in
|
||||
``memory.width // granularity`` chunks, which can be updated independently.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
memory : :class:`Memory`
|
||||
domain : str
|
||||
granularity : int
|
||||
addr : Signal(range(memory.depth)), in
|
||||
Write address.
|
||||
data : Signal(memory.width), in
|
||||
Write data.
|
||||
en : Signal(memory.width // granularity), in
|
||||
Write enable. Each bit selects a non-overlapping chunk of ``granularity`` bits on the
|
||||
``data`` signal, which is written to memory at ``addr``. Unselected chunks are ignored.
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
Raises :exn:`ValueError` if the write port granularity is greater than memory width, or does not
|
||||
divide memory width evenly.
|
||||
"""
|
||||
def __init__(self, memory, *, domain="sync", granularity=None, src_loc_at=0):
|
||||
if granularity is None:
|
||||
granularity = memory.width
|
||||
if not isinstance(granularity, int) or granularity < 0:
|
||||
raise TypeError("Write port granularity must be a non-negative integer, not {!r}"
|
||||
.format(granularity))
|
||||
if granularity > memory.width:
|
||||
raise ValueError("Write port granularity must not be greater than memory width "
|
||||
"({} > {})"
|
||||
.format(granularity, memory.width))
|
||||
if memory.width // granularity * granularity != memory.width:
|
||||
raise ValueError("Write port granularity must divide memory width evenly")
|
||||
|
||||
self.memory = memory
|
||||
self.domain = domain
|
||||
self.granularity = granularity
|
||||
|
||||
self.addr = Signal(range(memory.depth),
|
||||
name="{}_w_addr".format(memory.name), src_loc_at=1 + src_loc_at)
|
||||
self.data = Signal(memory.width,
|
||||
name="{}_w_data".format(memory.name), src_loc_at=1 + src_loc_at)
|
||||
self.en = Signal(memory.width // granularity,
|
||||
name="{}_w_en".format(memory.name), src_loc_at=1 + src_loc_at)
|
||||
|
||||
def elaborate(self, platform):
|
||||
f = Instance("$memwr",
|
||||
p_MEMID=self.memory,
|
||||
p_ABITS=self.addr.width,
|
||||
p_WIDTH=self.data.width,
|
||||
p_CLK_ENABLE=1,
|
||||
p_CLK_POLARITY=1,
|
||||
p_PRIORITY=0,
|
||||
i_CLK=ClockSignal(self.domain),
|
||||
i_EN=Cat(Repl(en_bit, self.granularity) for en_bit in self.en),
|
||||
i_ADDR=self.addr,
|
||||
i_DATA=self.data,
|
||||
)
|
||||
if len(self.en) > 1:
|
||||
for index, en_bit in enumerate(self.en):
|
||||
offset = index * self.granularity
|
||||
bits = slice(offset, offset + self.granularity)
|
||||
write_data = self.memory._array[self.addr][bits].eq(self.data[bits])
|
||||
f.add_statements(Switch(en_bit, { 1: write_data }))
|
||||
else:
|
||||
write_data = self.memory._array[self.addr].eq(self.data)
|
||||
f.add_statements(Switch(self.en, { 1: write_data }))
|
||||
for signal in self.memory._array:
|
||||
f.add_driver(signal, self.domain)
|
||||
return f
|
||||
|
||||
|
||||
class DummyPort:
|
||||
"""Dummy memory port.
|
||||
|
||||
This port can be used in place of either a read or a write port for testing and verification.
|
||||
It does not include any read/write port specific attributes, i.e. none besides ``"domain"``;
|
||||
any such attributes may be set manually.
|
||||
"""
|
||||
def __init__(self, *, data_width, addr_width, domain="sync", name=None, granularity=None):
|
||||
self.domain = domain
|
||||
|
||||
if granularity is None:
|
||||
granularity = data_width
|
||||
if name is None:
|
||||
name = tracer.get_var_name(depth=2, default="dummy")
|
||||
|
||||
self.addr = Signal(addr_width,
|
||||
name="{}_addr".format(name), src_loc_at=1)
|
||||
self.data = Signal(data_width,
|
||||
name="{}_data".format(name), src_loc_at=1)
|
||||
self.en = Signal(data_width // granularity,
|
||||
name="{}_en".format(name), src_loc_at=1)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.hdl.mem, use amaranth.hdl.mem",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,278 +1,7 @@
|
|||
from enum import Enum
|
||||
from collections import OrderedDict
|
||||
from functools import reduce, wraps
|
||||
|
||||
from .. import tracer
|
||||
from .._utils import union
|
||||
from .ast import *
|
||||
from amaranth.hdl.rec import *
|
||||
from amaranth.hdl.rec import __all__
|
||||
|
||||
|
||||
__all__ = ["Direction", "DIR_NONE", "DIR_FANOUT", "DIR_FANIN", "Layout", "Record"]
|
||||
|
||||
|
||||
Direction = Enum('Direction', ('NONE', 'FANOUT', 'FANIN'))
|
||||
|
||||
DIR_NONE = Direction.NONE
|
||||
DIR_FANOUT = Direction.FANOUT
|
||||
DIR_FANIN = Direction.FANIN
|
||||
|
||||
|
||||
class Layout:
|
||||
@staticmethod
|
||||
def cast(obj, *, src_loc_at=0):
|
||||
if isinstance(obj, Layout):
|
||||
return obj
|
||||
return Layout(obj, src_loc_at=1 + src_loc_at)
|
||||
|
||||
def __init__(self, fields, *, src_loc_at=0):
|
||||
self.fields = OrderedDict()
|
||||
for field in fields:
|
||||
if not isinstance(field, tuple) or len(field) not in (2, 3):
|
||||
raise TypeError("Field {!r} has invalid layout: should be either "
|
||||
"(name, shape) or (name, shape, direction)"
|
||||
.format(field))
|
||||
if len(field) == 2:
|
||||
name, shape = field
|
||||
direction = DIR_NONE
|
||||
if isinstance(shape, list):
|
||||
shape = Layout.cast(shape)
|
||||
else:
|
||||
name, shape, direction = field
|
||||
if not isinstance(direction, Direction):
|
||||
raise TypeError("Field {!r} has invalid direction: should be a Direction "
|
||||
"instance like DIR_FANIN"
|
||||
.format(field))
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Field {!r} has invalid name: should be a string"
|
||||
.format(field))
|
||||
if not isinstance(shape, Layout):
|
||||
try:
|
||||
# Check provided shape by calling Shape.cast and checking for exception
|
||||
Shape.cast(shape, src_loc_at=1 + src_loc_at)
|
||||
except Exception:
|
||||
raise TypeError("Field {!r} has invalid shape: should be castable to Shape "
|
||||
"or a list of fields of a nested record"
|
||||
.format(field))
|
||||
if name in self.fields:
|
||||
raise NameError("Field {!r} has a name that is already present in the layout"
|
||||
.format(field))
|
||||
self.fields[name] = (shape, direction)
|
||||
|
||||
def __getitem__(self, item):
|
||||
if isinstance(item, tuple):
|
||||
return Layout([
|
||||
(name, shape, dir)
|
||||
for (name, (shape, dir)) in self.fields.items()
|
||||
if name in item
|
||||
])
|
||||
|
||||
return self.fields[item]
|
||||
|
||||
def __iter__(self):
|
||||
for name, (shape, dir) in self.fields.items():
|
||||
yield (name, shape, dir)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.fields == other.fields
|
||||
|
||||
def __repr__(self):
|
||||
field_reprs = []
|
||||
for name, shape, dir in self:
|
||||
if dir == DIR_NONE:
|
||||
field_reprs.append("({!r}, {!r})".format(name, shape))
|
||||
else:
|
||||
field_reprs.append("({!r}, {!r}, Direction.{})".format(name, shape, dir.name))
|
||||
return "Layout([{}])".format(", ".join(field_reprs))
|
||||
|
||||
|
||||
class Record(ValueCastable):
|
||||
@staticmethod
|
||||
def like(other, *, name=None, name_suffix=None, src_loc_at=0):
|
||||
if name is not None:
|
||||
new_name = str(name)
|
||||
elif name_suffix is not None:
|
||||
new_name = other.name + str(name_suffix)
|
||||
else:
|
||||
new_name = tracer.get_var_name(depth=2 + src_loc_at, default=None)
|
||||
|
||||
def concat(a, b):
|
||||
if a is None:
|
||||
return b
|
||||
return "{}__{}".format(a, b)
|
||||
|
||||
fields = {}
|
||||
for field_name in other.fields:
|
||||
field = other[field_name]
|
||||
if isinstance(field, Record):
|
||||
fields[field_name] = Record.like(field, name=concat(new_name, field_name),
|
||||
src_loc_at=1 + src_loc_at)
|
||||
else:
|
||||
fields[field_name] = Signal.like(field, name=concat(new_name, field_name),
|
||||
src_loc_at=1 + src_loc_at)
|
||||
|
||||
return Record(other.layout, name=new_name, fields=fields, src_loc_at=1)
|
||||
|
||||
def __init__(self, layout, *, name=None, fields=None, src_loc_at=0):
|
||||
if name is None:
|
||||
name = tracer.get_var_name(depth=2 + src_loc_at, default=None)
|
||||
|
||||
self.name = name
|
||||
self.src_loc = tracer.get_src_loc(src_loc_at)
|
||||
|
||||
def concat(a, b):
|
||||
if a is None:
|
||||
return b
|
||||
return "{}__{}".format(a, b)
|
||||
|
||||
self.layout = Layout.cast(layout, src_loc_at=1 + src_loc_at)
|
||||
self.fields = OrderedDict()
|
||||
for field_name, field_shape, field_dir in self.layout:
|
||||
if fields is not None and field_name in fields:
|
||||
field = fields[field_name]
|
||||
if isinstance(field_shape, Layout):
|
||||
assert isinstance(field, Record) and field_shape == field.layout
|
||||
else:
|
||||
assert isinstance(field, Signal) and Shape.cast(field_shape) == field.shape()
|
||||
self.fields[field_name] = field
|
||||
else:
|
||||
if isinstance(field_shape, Layout):
|
||||
self.fields[field_name] = Record(field_shape, name=concat(name, field_name),
|
||||
src_loc_at=1 + src_loc_at)
|
||||
else:
|
||||
self.fields[field_name] = Signal(field_shape, name=concat(name, field_name),
|
||||
src_loc_at=1 + src_loc_at)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self[name]
|
||||
|
||||
def __getitem__(self, item):
|
||||
if isinstance(item, str):
|
||||
try:
|
||||
return self.fields[item]
|
||||
except KeyError:
|
||||
if self.name is None:
|
||||
reference = "Unnamed record"
|
||||
else:
|
||||
reference = "Record '{}'".format(self.name)
|
||||
raise AttributeError("{} does not have a field '{}'. Did you mean one of: {}?"
|
||||
.format(reference, item, ", ".join(self.fields))) from None
|
||||
elif isinstance(item, tuple):
|
||||
return Record(self.layout[item], fields={
|
||||
field_name: field_value
|
||||
for field_name, field_value in self.fields.items()
|
||||
if field_name in item
|
||||
})
|
||||
else:
|
||||
try:
|
||||
return Value.__getitem__(self, item)
|
||||
except KeyError:
|
||||
if self.name is None:
|
||||
reference = "Unnamed record"
|
||||
else:
|
||||
reference = "Record '{}'".format(self.name)
|
||||
raise AttributeError("{} does not have a field '{}'. Did you mean one of: {}?"
|
||||
.format(reference, item, ", ".join(self.fields))) from None
|
||||
|
||||
@ValueCastable.lowermethod
|
||||
def as_value(self):
|
||||
return Cat(self.fields.values())
|
||||
|
||||
def __len__(self):
|
||||
return len(self.as_value())
|
||||
|
||||
def _lhs_signals(self):
|
||||
return union((f._lhs_signals() for f in self.fields.values()), start=SignalSet())
|
||||
|
||||
def _rhs_signals(self):
|
||||
return union((f._rhs_signals() for f in self.fields.values()), start=SignalSet())
|
||||
|
||||
def __repr__(self):
|
||||
fields = []
|
||||
for field_name, field in self.fields.items():
|
||||
if isinstance(field, Signal):
|
||||
fields.append(field_name)
|
||||
else:
|
||||
fields.append(repr(field))
|
||||
name = self.name
|
||||
if name is None:
|
||||
name = "<unnamed>"
|
||||
return "(rec {} {})".format(name, " ".join(fields))
|
||||
|
||||
def shape(self):
|
||||
return self.as_value().shape()
|
||||
|
||||
def connect(self, *subordinates, include=None, exclude=None):
|
||||
def rec_name(record):
|
||||
if record.name is None:
|
||||
return "unnamed record"
|
||||
else:
|
||||
return "record '{}'".format(record.name)
|
||||
|
||||
for field in include or {}:
|
||||
if field not in self.fields:
|
||||
raise AttributeError("Cannot include field '{}' because it is not present in {}"
|
||||
.format(field, rec_name(self)))
|
||||
for field in exclude or {}:
|
||||
if field not in self.fields:
|
||||
raise AttributeError("Cannot exclude field '{}' because it is not present in {}"
|
||||
.format(field, rec_name(self)))
|
||||
|
||||
stmts = []
|
||||
for field in self.fields:
|
||||
if include is not None and field not in include:
|
||||
continue
|
||||
if exclude is not None and field in exclude:
|
||||
continue
|
||||
|
||||
shape, direction = self.layout[field]
|
||||
if not isinstance(shape, Layout) and direction == DIR_NONE:
|
||||
raise TypeError("Cannot connect field '{}' of {} because it does not have "
|
||||
"a direction"
|
||||
.format(field, rec_name(self)))
|
||||
|
||||
item = self.fields[field]
|
||||
subord_items = []
|
||||
for subord in subordinates:
|
||||
if field not in subord.fields:
|
||||
raise AttributeError("Cannot connect field '{}' of {} to subordinate {} "
|
||||
"because the subordinate record does not have this field"
|
||||
.format(field, rec_name(self), rec_name(subord)))
|
||||
subord_items.append(subord.fields[field])
|
||||
|
||||
if isinstance(shape, Layout):
|
||||
sub_include = include[field] if include and field in include else None
|
||||
sub_exclude = exclude[field] if exclude and field in exclude else None
|
||||
stmts += item.connect(*subord_items, include=sub_include, exclude=sub_exclude)
|
||||
else:
|
||||
if direction == DIR_FANOUT:
|
||||
stmts += [sub_item.eq(item) for sub_item in subord_items]
|
||||
if direction == DIR_FANIN:
|
||||
stmts += [item.eq(reduce(lambda a, b: a | b, subord_items))]
|
||||
|
||||
return stmts
|
||||
|
||||
def _valueproxy(name):
|
||||
value_func = getattr(Value, name)
|
||||
@wraps(value_func)
|
||||
def _wrapper(self, *args, **kwargs):
|
||||
return value_func(Value.cast(self), *args, **kwargs)
|
||||
return _wrapper
|
||||
|
||||
for name in [
|
||||
"__bool__",
|
||||
"__invert__", "__neg__",
|
||||
"__add__", "__radd__", "__sub__", "__rsub__",
|
||||
"__mul__", "__rmul__",
|
||||
"__mod__", "__rmod__", "__floordiv__", "__rfloordiv__",
|
||||
"__lshift__", "__rlshift__", "__rshift__", "__rrshift__",
|
||||
"__and__", "__rand__", "__xor__", "__rxor__", "__or__", "__ror__",
|
||||
"__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__",
|
||||
"__abs__", "__len__",
|
||||
"as_unsigned", "as_signed", "bool", "any", "all", "xor", "implies",
|
||||
"bit_select", "word_select", "matches",
|
||||
"shift_left", "shift_right", "rotate_left", "rotate_right", "eq"
|
||||
]:
|
||||
setattr(Record, name, _valueproxy(name))
|
||||
|
||||
del _valueproxy
|
||||
del name
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.hdl.rec, use amaranth.hdl.rec",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,743 +1,7 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Iterable
|
||||
from amaranth.hdl.xfrm import *
|
||||
from amaranth.hdl.xfrm import __all__
|
||||
|
||||
from .._utils import flatten
|
||||
from .. import tracer
|
||||
from .ast import *
|
||||
from .ast import _StatementList
|
||||
from .cd import *
|
||||
from .ir import *
|
||||
from .rec import *
|
||||
|
||||
|
||||
__all__ = ["ValueVisitor", "ValueTransformer",
|
||||
"StatementVisitor", "StatementTransformer",
|
||||
"FragmentTransformer",
|
||||
"TransformedElaboratable",
|
||||
"DomainCollector", "DomainRenamer", "DomainLowerer",
|
||||
"SampleDomainInjector", "SampleLowerer",
|
||||
"SwitchCleaner", "LHSGroupAnalyzer", "LHSGroupFilter",
|
||||
"ResetInserter", "EnableInserter"]
|
||||
|
||||
|
||||
class ValueVisitor(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def on_Const(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_AnyConst(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_AnySeq(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Signal(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_ClockSignal(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_ResetSignal(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Operator(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Slice(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Part(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Cat(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Repl(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_ArrayProxy(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Sample(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Initial(self, value):
|
||||
pass # :nocov:
|
||||
|
||||
def on_unknown_value(self, value):
|
||||
raise TypeError("Cannot transform value {!r}".format(value)) # :nocov:
|
||||
|
||||
def replace_value_src_loc(self, value, new_value):
|
||||
return True
|
||||
|
||||
def on_value(self, value):
|
||||
if type(value) is Const:
|
||||
new_value = self.on_Const(value)
|
||||
elif type(value) is AnyConst:
|
||||
new_value = self.on_AnyConst(value)
|
||||
elif type(value) is AnySeq:
|
||||
new_value = self.on_AnySeq(value)
|
||||
elif isinstance(value, Signal):
|
||||
# Uses `isinstance()` and not `type() is` because nmigen.compat requires it.
|
||||
new_value = self.on_Signal(value)
|
||||
elif type(value) is ClockSignal:
|
||||
new_value = self.on_ClockSignal(value)
|
||||
elif type(value) is ResetSignal:
|
||||
new_value = self.on_ResetSignal(value)
|
||||
elif type(value) is Operator:
|
||||
new_value = self.on_Operator(value)
|
||||
elif type(value) is Slice:
|
||||
new_value = self.on_Slice(value)
|
||||
elif type(value) is Part:
|
||||
new_value = self.on_Part(value)
|
||||
elif type(value) is Cat:
|
||||
new_value = self.on_Cat(value)
|
||||
elif type(value) is Repl:
|
||||
new_value = self.on_Repl(value)
|
||||
elif type(value) is ArrayProxy:
|
||||
new_value = self.on_ArrayProxy(value)
|
||||
elif type(value) is Sample:
|
||||
new_value = self.on_Sample(value)
|
||||
elif type(value) is Initial:
|
||||
new_value = self.on_Initial(value)
|
||||
elif isinstance(value, UserValue):
|
||||
# Uses `isinstance()` and not `type() is` to allow inheriting.
|
||||
new_value = self.on_value(value._lazy_lower())
|
||||
else:
|
||||
new_value = self.on_unknown_value(value)
|
||||
if isinstance(new_value, Value) and self.replace_value_src_loc(value, new_value):
|
||||
new_value.src_loc = value.src_loc
|
||||
return new_value
|
||||
|
||||
def __call__(self, value):
|
||||
return self.on_value(value)
|
||||
|
||||
|
||||
class ValueTransformer(ValueVisitor):
|
||||
def on_Const(self, value):
|
||||
return value
|
||||
|
||||
def on_AnyConst(self, value):
|
||||
return value
|
||||
|
||||
def on_AnySeq(self, value):
|
||||
return value
|
||||
|
||||
def on_Signal(self, value):
|
||||
return value
|
||||
|
||||
def on_ClockSignal(self, value):
|
||||
return value
|
||||
|
||||
def on_ResetSignal(self, value):
|
||||
return value
|
||||
|
||||
def on_Operator(self, value):
|
||||
return Operator(value.operator, [self.on_value(o) for o in value.operands])
|
||||
|
||||
def on_Slice(self, value):
|
||||
return Slice(self.on_value(value.value), value.start, value.stop)
|
||||
|
||||
def on_Part(self, value):
|
||||
return Part(self.on_value(value.value), self.on_value(value.offset),
|
||||
value.width, value.stride)
|
||||
|
||||
def on_Cat(self, value):
|
||||
return Cat(self.on_value(o) for o in value.parts)
|
||||
|
||||
def on_Repl(self, value):
|
||||
return Repl(self.on_value(value.value), value.count)
|
||||
|
||||
def on_ArrayProxy(self, value):
|
||||
return ArrayProxy([self.on_value(elem) for elem in value._iter_as_values()],
|
||||
self.on_value(value.index))
|
||||
|
||||
def on_Sample(self, value):
|
||||
return Sample(self.on_value(value.value), value.clocks, value.domain)
|
||||
|
||||
def on_Initial(self, value):
|
||||
return value
|
||||
|
||||
|
||||
class StatementVisitor(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def on_Assign(self, stmt):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Assert(self, stmt):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Assume(self, stmt):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Cover(self, stmt):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_Switch(self, stmt):
|
||||
pass # :nocov:
|
||||
|
||||
@abstractmethod
|
||||
def on_statements(self, stmts):
|
||||
pass # :nocov:
|
||||
|
||||
def on_unknown_statement(self, stmt):
|
||||
raise TypeError("Cannot transform statement {!r}".format(stmt)) # :nocov:
|
||||
|
||||
def replace_statement_src_loc(self, stmt, new_stmt):
|
||||
return True
|
||||
|
||||
def on_statement(self, stmt):
|
||||
if type(stmt) is Assign:
|
||||
new_stmt = self.on_Assign(stmt)
|
||||
elif type(stmt) is Assert:
|
||||
new_stmt = self.on_Assert(stmt)
|
||||
elif type(stmt) is Assume:
|
||||
new_stmt = self.on_Assume(stmt)
|
||||
elif type(stmt) is Cover:
|
||||
new_stmt = self.on_Cover(stmt)
|
||||
elif isinstance(stmt, Switch):
|
||||
# Uses `isinstance()` and not `type() is` because nmigen.compat requires it.
|
||||
new_stmt = self.on_Switch(stmt)
|
||||
elif isinstance(stmt, Iterable):
|
||||
new_stmt = self.on_statements(stmt)
|
||||
else:
|
||||
new_stmt = self.on_unknown_statement(stmt)
|
||||
if isinstance(new_stmt, Statement) and self.replace_statement_src_loc(stmt, new_stmt):
|
||||
new_stmt.src_loc = stmt.src_loc
|
||||
if isinstance(new_stmt, Switch) and isinstance(stmt, Switch):
|
||||
new_stmt.case_src_locs = stmt.case_src_locs
|
||||
if isinstance(new_stmt, Property):
|
||||
new_stmt._MustUse__used = True
|
||||
return new_stmt
|
||||
|
||||
def __call__(self, stmt):
|
||||
return self.on_statement(stmt)
|
||||
|
||||
|
||||
class StatementTransformer(StatementVisitor):
|
||||
def on_value(self, value):
|
||||
return value
|
||||
|
||||
def on_Assign(self, stmt):
|
||||
return Assign(self.on_value(stmt.lhs), self.on_value(stmt.rhs))
|
||||
|
||||
def on_Assert(self, stmt):
|
||||
return Assert(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en)
|
||||
|
||||
def on_Assume(self, stmt):
|
||||
return Assume(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en)
|
||||
|
||||
def on_Cover(self, stmt):
|
||||
return Cover(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en)
|
||||
|
||||
def on_Switch(self, stmt):
|
||||
cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items())
|
||||
return Switch(self.on_value(stmt.test), cases)
|
||||
|
||||
def on_statements(self, stmts):
|
||||
return _StatementList(flatten(self.on_statement(stmt) for stmt in stmts))
|
||||
|
||||
|
||||
class FragmentTransformer:
|
||||
def map_subfragments(self, fragment, new_fragment):
|
||||
for subfragment, name in fragment.subfragments:
|
||||
new_fragment.add_subfragment(self(subfragment), name)
|
||||
|
||||
def map_ports(self, fragment, new_fragment):
|
||||
for port, dir in fragment.ports.items():
|
||||
new_fragment.add_ports(port, dir=dir)
|
||||
|
||||
def map_named_ports(self, fragment, new_fragment):
|
||||
if hasattr(self, "on_value"):
|
||||
for name, (value, dir) in fragment.named_ports.items():
|
||||
new_fragment.named_ports[name] = self.on_value(value), dir
|
||||
else:
|
||||
new_fragment.named_ports = OrderedDict(fragment.named_ports.items())
|
||||
|
||||
def map_domains(self, fragment, new_fragment):
|
||||
for domain in fragment.iter_domains():
|
||||
new_fragment.add_domains(fragment.domains[domain])
|
||||
|
||||
def map_statements(self, fragment, new_fragment):
|
||||
if hasattr(self, "on_statement"):
|
||||
new_fragment.add_statements(map(self.on_statement, fragment.statements))
|
||||
else:
|
||||
new_fragment.add_statements(fragment.statements)
|
||||
|
||||
def map_drivers(self, fragment, new_fragment):
|
||||
for domain, signal in fragment.iter_drivers():
|
||||
new_fragment.add_driver(signal, domain)
|
||||
|
||||
def on_fragment(self, fragment):
|
||||
if isinstance(fragment, Instance):
|
||||
new_fragment = Instance(fragment.type)
|
||||
new_fragment.parameters = OrderedDict(fragment.parameters)
|
||||
self.map_named_ports(fragment, new_fragment)
|
||||
else:
|
||||
new_fragment = Fragment()
|
||||
new_fragment.flatten = fragment.flatten
|
||||
new_fragment.attrs = OrderedDict(fragment.attrs)
|
||||
self.map_ports(fragment, new_fragment)
|
||||
self.map_subfragments(fragment, new_fragment)
|
||||
self.map_domains(fragment, new_fragment)
|
||||
self.map_statements(fragment, new_fragment)
|
||||
self.map_drivers(fragment, new_fragment)
|
||||
return new_fragment
|
||||
|
||||
def __call__(self, value, *, src_loc_at=0):
|
||||
if isinstance(value, Fragment):
|
||||
return self.on_fragment(value)
|
||||
elif isinstance(value, TransformedElaboratable):
|
||||
value._transforms_.append(self)
|
||||
return value
|
||||
elif hasattr(value, "elaborate"):
|
||||
value = TransformedElaboratable(value, src_loc_at=1 + src_loc_at)
|
||||
value._transforms_.append(self)
|
||||
return value
|
||||
else:
|
||||
raise AttributeError("Object {!r} cannot be elaborated".format(value))
|
||||
|
||||
|
||||
class TransformedElaboratable(Elaboratable):
|
||||
def __init__(self, elaboratable, *, src_loc_at=0):
|
||||
assert hasattr(elaboratable, "elaborate")
|
||||
|
||||
# Fields prefixed and suffixed with underscore to avoid as many conflicts with the inner
|
||||
# object as possible, since we're forwarding attribute requests to it.
|
||||
self._elaboratable_ = elaboratable
|
||||
self._transforms_ = []
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self._elaboratable_, attr)
|
||||
|
||||
def elaborate(self, platform):
|
||||
fragment = Fragment.get(self._elaboratable_, platform)
|
||||
for transform in self._transforms_:
|
||||
fragment = transform(fragment)
|
||||
return fragment
|
||||
|
||||
|
||||
class DomainCollector(ValueVisitor, StatementVisitor):
|
||||
def __init__(self):
|
||||
self.used_domains = set()
|
||||
self.defined_domains = set()
|
||||
self._local_domains = set()
|
||||
|
||||
def _add_used_domain(self, domain_name):
|
||||
if domain_name is None:
|
||||
return
|
||||
if domain_name in self._local_domains:
|
||||
return
|
||||
self.used_domains.add(domain_name)
|
||||
|
||||
def on_ignore(self, value):
|
||||
pass
|
||||
|
||||
on_Const = on_ignore
|
||||
on_AnyConst = on_ignore
|
||||
on_AnySeq = on_ignore
|
||||
on_Signal = on_ignore
|
||||
|
||||
def on_ClockSignal(self, value):
|
||||
self._add_used_domain(value.domain)
|
||||
|
||||
def on_ResetSignal(self, value):
|
||||
self._add_used_domain(value.domain)
|
||||
|
||||
def on_Operator(self, value):
|
||||
for o in value.operands:
|
||||
self.on_value(o)
|
||||
|
||||
def on_Slice(self, value):
|
||||
self.on_value(value.value)
|
||||
|
||||
def on_Part(self, value):
|
||||
self.on_value(value.value)
|
||||
self.on_value(value.offset)
|
||||
|
||||
def on_Cat(self, value):
|
||||
for o in value.parts:
|
||||
self.on_value(o)
|
||||
|
||||
def on_Repl(self, value):
|
||||
self.on_value(value.value)
|
||||
|
||||
def on_ArrayProxy(self, value):
|
||||
for elem in value._iter_as_values():
|
||||
self.on_value(elem)
|
||||
self.on_value(value.index)
|
||||
|
||||
def on_Sample(self, value):
|
||||
self.on_value(value.value)
|
||||
|
||||
def on_Initial(self, value):
|
||||
pass
|
||||
|
||||
def on_Assign(self, stmt):
|
||||
self.on_value(stmt.lhs)
|
||||
self.on_value(stmt.rhs)
|
||||
|
||||
def on_property(self, stmt):
|
||||
self.on_value(stmt.test)
|
||||
|
||||
on_Assert = on_property
|
||||
on_Assume = on_property
|
||||
on_Cover = on_property
|
||||
|
||||
def on_Switch(self, stmt):
|
||||
self.on_value(stmt.test)
|
||||
for stmts in stmt.cases.values():
|
||||
self.on_statement(stmts)
|
||||
|
||||
def on_statements(self, stmts):
|
||||
for stmt in stmts:
|
||||
self.on_statement(stmt)
|
||||
|
||||
def on_fragment(self, fragment):
|
||||
if isinstance(fragment, Instance):
|
||||
for name, (value, dir) in fragment.named_ports.items():
|
||||
self.on_value(value)
|
||||
|
||||
old_local_domains, self._local_domains = self._local_domains, set(self._local_domains)
|
||||
for domain_name, domain in fragment.domains.items():
|
||||
if domain.local:
|
||||
self._local_domains.add(domain_name)
|
||||
else:
|
||||
self.defined_domains.add(domain_name)
|
||||
|
||||
self.on_statements(fragment.statements)
|
||||
for domain_name in fragment.drivers:
|
||||
self._add_used_domain(domain_name)
|
||||
for subfragment, name in fragment.subfragments:
|
||||
self.on_fragment(subfragment)
|
||||
|
||||
self._local_domains = old_local_domains
|
||||
|
||||
def __call__(self, fragment):
|
||||
self.on_fragment(fragment)
|
||||
|
||||
|
||||
class DomainRenamer(FragmentTransformer, ValueTransformer, StatementTransformer):
|
||||
def __init__(self, domain_map):
|
||||
if isinstance(domain_map, str):
|
||||
domain_map = {"sync": domain_map}
|
||||
for src, dst in domain_map.items():
|
||||
if src == "comb":
|
||||
raise ValueError("Domain '{}' may not be renamed".format(src))
|
||||
if dst == "comb":
|
||||
raise ValueError("Domain '{}' may not be renamed to '{}'".format(src, dst))
|
||||
self.domain_map = OrderedDict(domain_map)
|
||||
|
||||
def on_ClockSignal(self, value):
|
||||
if value.domain in self.domain_map:
|
||||
return ClockSignal(self.domain_map[value.domain])
|
||||
return value
|
||||
|
||||
def on_ResetSignal(self, value):
|
||||
if value.domain in self.domain_map:
|
||||
return ResetSignal(self.domain_map[value.domain],
|
||||
allow_reset_less=value.allow_reset_less)
|
||||
return value
|
||||
|
||||
def map_domains(self, fragment, new_fragment):
|
||||
for domain in fragment.iter_domains():
|
||||
cd = fragment.domains[domain]
|
||||
if domain in self.domain_map:
|
||||
if cd.name == domain:
|
||||
# Rename the actual ClockDomain object.
|
||||
cd.rename(self.domain_map[domain])
|
||||
else:
|
||||
assert cd.name == self.domain_map[domain]
|
||||
new_fragment.add_domains(cd)
|
||||
|
||||
def map_drivers(self, fragment, new_fragment):
|
||||
for domain, signals in fragment.drivers.items():
|
||||
if domain in self.domain_map:
|
||||
domain = self.domain_map[domain]
|
||||
for signal in signals:
|
||||
new_fragment.add_driver(self.on_value(signal), domain)
|
||||
|
||||
|
||||
class DomainLowerer(FragmentTransformer, ValueTransformer, StatementTransformer):
|
||||
def __init__(self, domains=None):
|
||||
self.domains = domains
|
||||
|
||||
def _resolve(self, domain, context):
|
||||
if domain not in self.domains:
|
||||
raise DomainError("Signal {!r} refers to nonexistent domain '{}'"
|
||||
.format(context, domain))
|
||||
return self.domains[domain]
|
||||
|
||||
def map_drivers(self, fragment, new_fragment):
|
||||
for domain, signal in fragment.iter_drivers():
|
||||
new_fragment.add_driver(self.on_value(signal), domain)
|
||||
|
||||
def replace_value_src_loc(self, value, new_value):
|
||||
return not isinstance(value, (ClockSignal, ResetSignal))
|
||||
|
||||
def on_ClockSignal(self, value):
|
||||
domain = self._resolve(value.domain, value)
|
||||
return domain.clk
|
||||
|
||||
def on_ResetSignal(self, value):
|
||||
domain = self._resolve(value.domain, value)
|
||||
if domain.rst is None:
|
||||
if value.allow_reset_less:
|
||||
return Const(0)
|
||||
else:
|
||||
raise DomainError("Signal {!r} refers to reset of reset-less domain '{}'"
|
||||
.format(value, value.domain))
|
||||
return domain.rst
|
||||
|
||||
def _insert_resets(self, fragment):
|
||||
for domain_name, signals in fragment.drivers.items():
|
||||
if domain_name is None:
|
||||
continue
|
||||
domain = fragment.domains[domain_name]
|
||||
if domain.rst is None:
|
||||
continue
|
||||
stmts = [signal.eq(Const(signal.reset, signal.width))
|
||||
for signal in signals if not signal.reset_less]
|
||||
fragment.add_statements(Switch(domain.rst, {1: stmts}))
|
||||
|
||||
def on_fragment(self, fragment):
|
||||
self.domains = fragment.domains
|
||||
new_fragment = super().on_fragment(fragment)
|
||||
self._insert_resets(new_fragment)
|
||||
return new_fragment
|
||||
|
||||
|
||||
class SampleDomainInjector(ValueTransformer, StatementTransformer):
|
||||
def __init__(self, domain):
|
||||
self.domain = domain
|
||||
|
||||
def on_Sample(self, value):
|
||||
if value.domain is not None:
|
||||
return value
|
||||
return Sample(value.value, value.clocks, self.domain)
|
||||
|
||||
def __call__(self, stmts):
|
||||
return self.on_statement(stmts)
|
||||
|
||||
|
||||
class SampleLowerer(FragmentTransformer, ValueTransformer, StatementTransformer):
|
||||
def __init__(self):
|
||||
self.initial = None
|
||||
self.sample_cache = None
|
||||
self.sample_stmts = None
|
||||
|
||||
def _name_reset(self, value):
|
||||
if isinstance(value, Const):
|
||||
return "c${}".format(value.value), value.value
|
||||
elif isinstance(value, Signal):
|
||||
return "s${}".format(value.name), value.reset
|
||||
elif isinstance(value, ClockSignal):
|
||||
return "clk", 0
|
||||
elif isinstance(value, ResetSignal):
|
||||
return "rst", 1
|
||||
elif isinstance(value, Initial):
|
||||
return "init", 0 # Past(Initial()) produces 0, 1, 0, 0, ...
|
||||
else:
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_Sample(self, value):
|
||||
if value in self.sample_cache:
|
||||
return self.sample_cache[value]
|
||||
|
||||
sampled_value = self.on_value(value.value)
|
||||
if value.clocks == 0:
|
||||
sample = sampled_value
|
||||
else:
|
||||
assert value.domain is not None
|
||||
sampled_name, sampled_reset = self._name_reset(value.value)
|
||||
name = "$sample${}${}${}".format(sampled_name, value.domain, value.clocks)
|
||||
sample = Signal.like(value.value, name=name, reset_less=True, reset=sampled_reset)
|
||||
sample.attrs["nmigen.sample_reg"] = True
|
||||
|
||||
prev_sample = self.on_Sample(Sample(sampled_value, value.clocks - 1, value.domain))
|
||||
if value.domain not in self.sample_stmts:
|
||||
self.sample_stmts[value.domain] = []
|
||||
self.sample_stmts[value.domain].append(sample.eq(prev_sample))
|
||||
|
||||
self.sample_cache[value] = sample
|
||||
return sample
|
||||
|
||||
def on_Initial(self, value):
|
||||
if self.initial is None:
|
||||
self.initial = Signal(name="init")
|
||||
return self.initial
|
||||
|
||||
def map_statements(self, fragment, new_fragment):
|
||||
self.initial = None
|
||||
self.sample_cache = ValueDict()
|
||||
self.sample_stmts = OrderedDict()
|
||||
new_fragment.add_statements(map(self.on_statement, fragment.statements))
|
||||
for domain, stmts in self.sample_stmts.items():
|
||||
new_fragment.add_statements(stmts)
|
||||
for stmt in stmts:
|
||||
new_fragment.add_driver(stmt.lhs, domain)
|
||||
if self.initial is not None:
|
||||
new_fragment.add_subfragment(Instance("$initstate", o_Y=self.initial))
|
||||
|
||||
|
||||
class SwitchCleaner(StatementVisitor):
|
||||
def on_ignore(self, stmt):
|
||||
return stmt
|
||||
|
||||
on_Assign = on_ignore
|
||||
on_Assert = on_ignore
|
||||
on_Assume = on_ignore
|
||||
on_Cover = on_ignore
|
||||
|
||||
def on_Switch(self, stmt):
|
||||
cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items())
|
||||
if any(len(s) for s in cases.values()):
|
||||
return Switch(stmt.test, cases)
|
||||
|
||||
def on_statements(self, stmts):
|
||||
stmts = flatten(self.on_statement(stmt) for stmt in stmts)
|
||||
return _StatementList(stmt for stmt in stmts if stmt is not None)
|
||||
|
||||
|
||||
class LHSGroupAnalyzer(StatementVisitor):
|
||||
def __init__(self):
|
||||
self.signals = SignalDict()
|
||||
self.unions = OrderedDict()
|
||||
|
||||
def find(self, signal):
|
||||
if signal not in self.signals:
|
||||
self.signals[signal] = len(self.signals)
|
||||
group = self.signals[signal]
|
||||
while group in self.unions:
|
||||
group = self.unions[group]
|
||||
self.signals[signal] = group
|
||||
return group
|
||||
|
||||
def unify(self, root, *leaves):
|
||||
root_group = self.find(root)
|
||||
for leaf in leaves:
|
||||
leaf_group = self.find(leaf)
|
||||
if root_group == leaf_group:
|
||||
continue
|
||||
self.unions[leaf_group] = root_group
|
||||
|
||||
def groups(self):
|
||||
groups = OrderedDict()
|
||||
for signal in self.signals:
|
||||
group = self.find(signal)
|
||||
if group not in groups:
|
||||
groups[group] = SignalSet()
|
||||
groups[group].add(signal)
|
||||
return groups
|
||||
|
||||
def on_Assign(self, stmt):
|
||||
lhs_signals = stmt._lhs_signals()
|
||||
if lhs_signals:
|
||||
self.unify(*stmt._lhs_signals())
|
||||
|
||||
def on_property(self, stmt):
|
||||
lhs_signals = stmt._lhs_signals()
|
||||
if lhs_signals:
|
||||
self.unify(*stmt._lhs_signals())
|
||||
|
||||
on_Assert = on_property
|
||||
on_Assume = on_property
|
||||
on_Cover = on_property
|
||||
|
||||
def on_Switch(self, stmt):
|
||||
for case_stmts in stmt.cases.values():
|
||||
self.on_statements(case_stmts)
|
||||
|
||||
def on_statements(self, stmts):
|
||||
for stmt in stmts:
|
||||
self.on_statement(stmt)
|
||||
|
||||
def __call__(self, stmts):
|
||||
self.on_statements(stmts)
|
||||
return self.groups()
|
||||
|
||||
|
||||
class LHSGroupFilter(SwitchCleaner):
|
||||
def __init__(self, signals):
|
||||
self.signals = signals
|
||||
|
||||
def on_Assign(self, stmt):
|
||||
# The invariant provided by LHSGroupAnalyzer is that all signals that ever appear together
|
||||
# on LHS are a part of the same group, so it is sufficient to check any of them.
|
||||
lhs_signals = stmt.lhs._lhs_signals()
|
||||
if lhs_signals:
|
||||
any_lhs_signal = next(iter(lhs_signals))
|
||||
if any_lhs_signal in self.signals:
|
||||
return stmt
|
||||
|
||||
def on_property(self, stmt):
|
||||
any_lhs_signal = next(iter(stmt._lhs_signals()))
|
||||
if any_lhs_signal in self.signals:
|
||||
return stmt
|
||||
|
||||
on_Assert = on_property
|
||||
on_Assume = on_property
|
||||
on_Cover = on_property
|
||||
|
||||
|
||||
class _ControlInserter(FragmentTransformer):
|
||||
def __init__(self, controls):
|
||||
self.src_loc = None
|
||||
if isinstance(controls, Value):
|
||||
controls = {"sync": controls}
|
||||
self.controls = OrderedDict(controls)
|
||||
|
||||
def on_fragment(self, fragment):
|
||||
new_fragment = super().on_fragment(fragment)
|
||||
for domain, signals in fragment.drivers.items():
|
||||
if domain is None or domain not in self.controls:
|
||||
continue
|
||||
self._insert_control(new_fragment, domain, signals)
|
||||
return new_fragment
|
||||
|
||||
def _insert_control(self, fragment, domain, signals):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def __call__(self, value, *, src_loc_at=0):
|
||||
self.src_loc = tracer.get_src_loc(src_loc_at=src_loc_at)
|
||||
return super().__call__(value, src_loc_at=1 + src_loc_at)
|
||||
|
||||
|
||||
class ResetInserter(_ControlInserter):
|
||||
def _insert_control(self, fragment, domain, signals):
|
||||
stmts = [s.eq(Const(s.reset, s.width)) for s in signals if not s.reset_less]
|
||||
fragment.add_statements(Switch(self.controls[domain], {1: stmts}, src_loc=self.src_loc))
|
||||
|
||||
|
||||
class EnableInserter(_ControlInserter):
|
||||
def _insert_control(self, fragment, domain, signals):
|
||||
stmts = [s.eq(s) for s in signals]
|
||||
fragment.add_statements(Switch(self.controls[domain], {0: stmts}, src_loc=self.src_loc))
|
||||
|
||||
def on_fragment(self, fragment):
|
||||
new_fragment = super().on_fragment(fragment)
|
||||
if isinstance(new_fragment, Instance) and new_fragment.type in ("$memrd", "$memwr"):
|
||||
clk_port, clk_dir = new_fragment.named_ports["CLK"]
|
||||
if isinstance(clk_port, ClockSignal) and clk_port.domain in self.controls:
|
||||
en_port, en_dir = new_fragment.named_ports["EN"]
|
||||
en_port = Mux(self.controls[clk_port.domain], en_port, Const(0, len(en_port)))
|
||||
new_fragment.named_ports["EN"] = en_port, en_dir
|
||||
return new_fragment
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.hdl.xfrm, use amaranth.hdl.xfrm",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
from amaranth.lib import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.lib, use amaranth.lib",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
@ -1,267 +1,7 @@
|
|||
from .. import *
|
||||
from amaranth.lib.cdc import *
|
||||
from amaranth.lib.cdc import __all__
|
||||
|
||||
|
||||
__all__ = ["FFSynchronizer", "AsyncFFSynchronizer", "ResetSynchronizer", "PulseSynchronizer"]
|
||||
|
||||
|
||||
def _check_stages(stages):
|
||||
if not isinstance(stages, int) or stages < 1:
|
||||
raise TypeError("Synchronization stage count must be a positive integer, not {!r}"
|
||||
.format(stages))
|
||||
if stages < 2:
|
||||
raise ValueError("Synchronization stage count may not safely be less than 2")
|
||||
|
||||
|
||||
class FFSynchronizer(Elaboratable):
|
||||
"""Resynchronise a signal to a different clock domain.
|
||||
|
||||
Consists of a chain of flip-flops. Eliminates metastabilities at the output, but provides
|
||||
no other guarantee as to the safe domain-crossing of a signal.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : Signal(n), in
|
||||
Signal to be resynchronised.
|
||||
o : Signal(n), out
|
||||
Signal connected to synchroniser output.
|
||||
o_domain : str
|
||||
Name of output clock domain.
|
||||
reset : int
|
||||
Reset value of the flip-flops. On FPGAs, even if ``reset_less`` is True,
|
||||
the :class:`FFSynchronizer` is still set to this value during initialization.
|
||||
reset_less : bool
|
||||
If ``True`` (the default), this :class:`FFSynchronizer` is unaffected by ``o_domain``
|
||||
reset. See "Note on Reset" below.
|
||||
stages : int
|
||||
Number of synchronization stages between input and output. The lowest safe number is 2,
|
||||
with higher numbers reducing MTBF further, at the cost of increased latency.
|
||||
max_input_delay : None or float
|
||||
Maximum delay from the input signal's clock to the first synchronization stage, in seconds.
|
||||
If specified and the platform does not support it, elaboration will fail.
|
||||
|
||||
Platform override
|
||||
-----------------
|
||||
Define the ``get_ff_sync`` platform method to override the implementation of
|
||||
:class:`FFSynchronizer`, e.g. to instantiate library cells directly.
|
||||
|
||||
Note on Reset
|
||||
-------------
|
||||
:class:`FFSynchronizer` is non-resettable by default. Usually this is the safest option;
|
||||
on FPGAs the :class:`FFSynchronizer` will still be initialized to its ``reset`` value when
|
||||
the FPGA loads its configuration.
|
||||
|
||||
However, in designs where the value of the :class:`FFSynchronizer` must be valid immediately
|
||||
after reset, consider setting ``reset_less`` to False if any of the following is true:
|
||||
|
||||
- You are targeting an ASIC, or an FPGA that does not allow arbitrary initial flip-flop states;
|
||||
- Your design features warm (non-power-on) resets of ``o_domain``, so the one-time
|
||||
initialization at power on is insufficient;
|
||||
- Your design features a sequenced reset, and the :class:`FFSynchronizer` must maintain
|
||||
its reset value until ``o_domain`` reset specifically is deasserted.
|
||||
|
||||
:class:`FFSynchronizer` is reset by the ``o_domain`` reset only.
|
||||
"""
|
||||
def __init__(self, i, o, *, o_domain="sync", reset=0, reset_less=True, stages=2,
|
||||
max_input_delay=None):
|
||||
_check_stages(stages)
|
||||
|
||||
self.i = i
|
||||
self.o = o
|
||||
|
||||
self._reset = reset
|
||||
self._reset_less = reset_less
|
||||
self._o_domain = o_domain
|
||||
self._stages = stages
|
||||
|
||||
self._max_input_delay = max_input_delay
|
||||
|
||||
def elaborate(self, platform):
|
||||
if hasattr(platform, "get_ff_sync"):
|
||||
return platform.get_ff_sync(self)
|
||||
|
||||
if self._max_input_delay is not None:
|
||||
raise NotImplementedError("Platform '{}' does not support constraining input delay "
|
||||
"for FFSynchronizer"
|
||||
.format(type(platform).__name__))
|
||||
|
||||
m = Module()
|
||||
flops = [Signal(self.i.shape(), name="stage{}".format(index),
|
||||
reset=self._reset, reset_less=self._reset_less)
|
||||
for index in range(self._stages)]
|
||||
for i, o in zip((self.i, *flops), flops):
|
||||
m.d[self._o_domain] += o.eq(i)
|
||||
m.d.comb += self.o.eq(flops[-1])
|
||||
return m
|
||||
|
||||
|
||||
class AsyncFFSynchronizer(Elaboratable):
|
||||
"""Synchronize deassertion of an asynchronous signal.
|
||||
|
||||
The signal driven by the :class:`AsyncFFSynchronizer` is asserted asynchronously and deasserted
|
||||
synchronously, eliminating metastability during deassertion.
|
||||
|
||||
This synchronizer is primarily useful for resets and reset-like signals.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : Signal(1), in
|
||||
Asynchronous input signal, to be synchronized.
|
||||
o : Signal(1), out
|
||||
Synchronously released output signal.
|
||||
o_domain : str
|
||||
Name of clock domain to synchronize to.
|
||||
stages : int, >=2
|
||||
Number of synchronization stages between input and output. The lowest safe number is 2,
|
||||
with higher numbers reducing MTBF further, at the cost of increased deassertion latency.
|
||||
async_edge : str
|
||||
The edge of the input signal which causes the output to be set. Must be one of "pos" or "neg".
|
||||
max_input_delay : None or float
|
||||
Maximum delay from the input signal's clock to the first synchronization stage, in seconds.
|
||||
If specified and the platform does not support it, elaboration will fail.
|
||||
|
||||
Platform override
|
||||
-----------------
|
||||
Define the ``get_async_ff_sync`` platform method to override the implementation of
|
||||
:class:`AsyncFFSynchronizer`, e.g. to instantiate library cells directly.
|
||||
"""
|
||||
def __init__(self, i, o, *, o_domain="sync", stages=2, async_edge="pos", max_input_delay=None):
|
||||
_check_stages(stages)
|
||||
|
||||
if len(i) != 1:
|
||||
raise ValueError("AsyncFFSynchronizer input width must be 1, not {}"
|
||||
.format(len(i)))
|
||||
if len(o) != 1:
|
||||
raise ValueError("AsyncFFSynchronizer output width must be 1, not {}"
|
||||
.format(len(o)))
|
||||
|
||||
if async_edge not in ("pos", "neg"):
|
||||
raise ValueError("AsyncFFSynchronizer async edge must be one of 'pos' or 'neg', "
|
||||
"not {!r}"
|
||||
.format(async_edge))
|
||||
|
||||
self.i = i
|
||||
self.o = o
|
||||
|
||||
self._o_domain = o_domain
|
||||
self._stages = stages
|
||||
|
||||
self._edge = async_edge
|
||||
|
||||
self._max_input_delay = max_input_delay
|
||||
|
||||
def elaborate(self, platform):
|
||||
if hasattr(platform, "get_async_ff_sync"):
|
||||
return platform.get_async_ff_sync(self)
|
||||
|
||||
if self._max_input_delay is not None:
|
||||
raise NotImplementedError("Platform '{}' does not support constraining input delay "
|
||||
"for AsyncFFSynchronizer"
|
||||
.format(type(platform).__name__))
|
||||
|
||||
m = Module()
|
||||
m.domains += ClockDomain("async_ff", async_reset=True, local=True)
|
||||
flops = [Signal(1, name="stage{}".format(index), reset=1)
|
||||
for index in range(self._stages)]
|
||||
for i, o in zip((0, *flops), flops):
|
||||
m.d.async_ff += o.eq(i)
|
||||
|
||||
if self._edge == "pos":
|
||||
m.d.comb += ResetSignal("async_ff").eq(self.i)
|
||||
else:
|
||||
m.d.comb += ResetSignal("async_ff").eq(~self.i)
|
||||
|
||||
m.d.comb += [
|
||||
ClockSignal("async_ff").eq(ClockSignal(self._o_domain)),
|
||||
self.o.eq(flops[-1])
|
||||
]
|
||||
|
||||
return m
|
||||
|
||||
|
||||
class ResetSynchronizer(Elaboratable):
|
||||
"""Synchronize deassertion of a clock domain reset.
|
||||
|
||||
The reset of the clock domain driven by the :class:`ResetSynchronizer` is asserted
|
||||
asynchronously and deasserted synchronously, eliminating metastability during deassertion.
|
||||
|
||||
The driven clock domain could use a reset that is asserted either synchronously or
|
||||
asynchronously; a reset is always deasserted synchronously. A domain with an asynchronously
|
||||
asserted reset is useful if the clock of the domain may be gated, yet the domain still
|
||||
needs to be reset promptly; otherwise, synchronously asserted reset (the default) should
|
||||
be used.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arst : Signal(1), in
|
||||
Asynchronous reset signal, to be synchronized.
|
||||
domain : str
|
||||
Name of clock domain to reset.
|
||||
stages : int, >=2
|
||||
Number of synchronization stages between input and output. The lowest safe number is 2,
|
||||
with higher numbers reducing MTBF further, at the cost of increased deassertion latency.
|
||||
max_input_delay : None or float
|
||||
Maximum delay from the input signal's clock to the first synchronization stage, in seconds.
|
||||
If specified and the platform does not support it, elaboration will fail.
|
||||
|
||||
Platform override
|
||||
-----------------
|
||||
Define the ``get_reset_sync`` platform method to override the implementation of
|
||||
:class:`ResetSynchronizer`, e.g. to instantiate library cells directly.
|
||||
"""
|
||||
def __init__(self, arst, *, domain="sync", stages=2, max_input_delay=None):
|
||||
_check_stages(stages)
|
||||
|
||||
self.arst = arst
|
||||
|
||||
self._domain = domain
|
||||
self._stages = stages
|
||||
|
||||
self._max_input_delay = max_input_delay
|
||||
|
||||
def elaborate(self, platform):
|
||||
return AsyncFFSynchronizer(self.arst, ResetSignal(self._domain), o_domain=self._domain,
|
||||
stages=self._stages, max_input_delay=self._max_input_delay)
|
||||
|
||||
|
||||
class PulseSynchronizer(Elaboratable):
|
||||
"""A one-clock pulse on the input produces a one-clock pulse on the output.
|
||||
|
||||
If the output clock is faster than the input clock, then the input may be safely asserted at
|
||||
100% duty cycle. Otherwise, if the clock ratio is `n`:1, the input may be asserted at most once
|
||||
in every `n` input clocks, else pulses may be dropped. Other than this there is no constraint
|
||||
on the ratio of input and output clock frequency.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i_domain : str
|
||||
Name of input clock domain.
|
||||
o_domain : str
|
||||
Name of output clock domain.
|
||||
stages : int, >=2
|
||||
Number of synchronization stages between input and output. The lowest safe number is 2,
|
||||
with higher numbers reducing MTBF further, at the cost of increased deassertion latency.
|
||||
"""
|
||||
def __init__(self, i_domain, o_domain, *, stages=2):
|
||||
_check_stages(stages)
|
||||
|
||||
self.i = Signal()
|
||||
self.o = Signal()
|
||||
|
||||
self._i_domain = i_domain
|
||||
self._o_domain = o_domain
|
||||
self._stages = stages
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
|
||||
i_toggle = Signal()
|
||||
o_toggle = Signal()
|
||||
r_toggle = Signal()
|
||||
ff_sync = m.submodules.ff_sync = \
|
||||
FFSynchronizer(i_toggle, o_toggle, o_domain=self._o_domain, stages=self._stages)
|
||||
|
||||
m.d[self._i_domain] += i_toggle.eq(i_toggle ^ self.i)
|
||||
m.d[self._o_domain] += r_toggle.eq(o_toggle)
|
||||
m.d.comb += self.o.eq(o_toggle ^ r_toggle)
|
||||
|
||||
return m
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.lib.cdc, use amaranth.lib.cdc",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,186 +1,7 @@
|
|||
"""Encoders and decoders between binary and one-hot representation."""
|
||||
|
||||
from .. import *
|
||||
from amaranth.lib.coding import *
|
||||
from amaranth.lib.coding import __all__
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Encoder", "Decoder",
|
||||
"PriorityEncoder", "PriorityDecoder",
|
||||
"GrayEncoder", "GrayDecoder",
|
||||
]
|
||||
|
||||
|
||||
class Encoder(Elaboratable):
|
||||
"""Encode one-hot to binary.
|
||||
|
||||
If one bit in ``i`` is asserted, ``n`` is low and ``o`` indicates the asserted bit.
|
||||
Otherwise, ``n`` is high and ``o`` is ``0``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Bit width of the input
|
||||
|
||||
Attributes
|
||||
----------
|
||||
i : Signal(width), in
|
||||
One-hot input.
|
||||
o : Signal(range(width)), out
|
||||
Encoded binary.
|
||||
n : Signal, out
|
||||
Invalid: either none or multiple input bits are asserted.
|
||||
"""
|
||||
def __init__(self, width):
|
||||
self.width = width
|
||||
|
||||
self.i = Signal(width)
|
||||
self.o = Signal(range(width))
|
||||
self.n = Signal()
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
with m.Switch(self.i):
|
||||
for j in range(self.width):
|
||||
with m.Case(1 << j):
|
||||
m.d.comb += self.o.eq(j)
|
||||
with m.Case():
|
||||
m.d.comb += self.n.eq(1)
|
||||
return m
|
||||
|
||||
|
||||
class PriorityEncoder(Elaboratable):
|
||||
"""Priority encode requests to binary.
|
||||
|
||||
If any bit in ``i`` is asserted, ``n`` is low and ``o`` indicates the least significant
|
||||
asserted bit.
|
||||
Otherwise, ``n`` is high and ``o`` is ``0``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Bit width of the input.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
i : Signal(width), in
|
||||
Input requests.
|
||||
o : Signal(range(width)), out
|
||||
Encoded binary.
|
||||
n : Signal, out
|
||||
Invalid: no input bits are asserted.
|
||||
"""
|
||||
def __init__(self, width):
|
||||
self.width = width
|
||||
|
||||
self.i = Signal(width)
|
||||
self.o = Signal(range(width))
|
||||
self.n = Signal()
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
for j in reversed(range(self.width)):
|
||||
with m.If(self.i[j]):
|
||||
m.d.comb += self.o.eq(j)
|
||||
m.d.comb += self.n.eq(self.i == 0)
|
||||
return m
|
||||
|
||||
|
||||
class Decoder(Elaboratable):
|
||||
"""Decode binary to one-hot.
|
||||
|
||||
If ``n`` is low, only the ``i``th bit in ``o`` is asserted.
|
||||
If ``n`` is high, ``o`` is ``0``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Bit width of the output.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
i : Signal(range(width)), in
|
||||
Input binary.
|
||||
o : Signal(width), out
|
||||
Decoded one-hot.
|
||||
n : Signal, in
|
||||
Invalid, no output bits are to be asserted.
|
||||
"""
|
||||
def __init__(self, width):
|
||||
self.width = width
|
||||
|
||||
self.i = Signal(range(width))
|
||||
self.n = Signal()
|
||||
self.o = Signal(width)
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
with m.Switch(self.i):
|
||||
for j in range(len(self.o)):
|
||||
with m.Case(j):
|
||||
m.d.comb += self.o.eq(1 << j)
|
||||
with m.If(self.n):
|
||||
m.d.comb += self.o.eq(0)
|
||||
return m
|
||||
|
||||
|
||||
class PriorityDecoder(Decoder):
|
||||
"""Decode binary to priority request.
|
||||
|
||||
Identical to :class:`Decoder`.
|
||||
"""
|
||||
|
||||
|
||||
class GrayEncoder(Elaboratable):
|
||||
"""Encode binary to Gray code.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Bit width.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
i : Signal(width), in
|
||||
Input natural binary.
|
||||
o : Signal(width), out
|
||||
Encoded Gray code.
|
||||
"""
|
||||
def __init__(self, width):
|
||||
self.width = width
|
||||
|
||||
self.i = Signal(width)
|
||||
self.o = Signal(width)
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
m.d.comb += self.o.eq(self.i ^ self.i[1:])
|
||||
return m
|
||||
|
||||
|
||||
class GrayDecoder(Elaboratable):
|
||||
"""Decode Gray code to binary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Bit width.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
i : Signal(width), in
|
||||
Input Gray code.
|
||||
o : Signal(width), out
|
||||
Decoded natural binary.
|
||||
"""
|
||||
def __init__(self, width):
|
||||
self.width = width
|
||||
|
||||
self.i = Signal(width)
|
||||
self.o = Signal(width)
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
m.d.comb += self.o[-1].eq(self.i[-1])
|
||||
for i in reversed(range(self.width - 1)):
|
||||
m.d.comb += self.o[i].eq(self.o[i + 1] ^ self.i[i])
|
||||
return m
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.lib.coding, use amaranth.lib.coding",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,529 +1,7 @@
|
|||
"""First-in first-out queues."""
|
||||
from amaranth.lib.fifo import *
|
||||
from amaranth.lib.fifo import __all__
|
||||
|
||||
from .. import *
|
||||
from ..asserts import *
|
||||
from .._utils import log2_int
|
||||
from .coding import GrayEncoder, GrayDecoder
|
||||
from .cdc import FFSynchronizer, AsyncFFSynchronizer
|
||||
|
||||
|
||||
__all__ = ["FIFOInterface", "SyncFIFO", "SyncFIFOBuffered", "AsyncFIFO", "AsyncFIFOBuffered"]
|
||||
|
||||
|
||||
class FIFOInterface:
|
||||
_doc_template = """
|
||||
{description}
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Bit width of data entries.
|
||||
depth : int
|
||||
Depth of the queue. If zero, the FIFO cannot be read from or written to.
|
||||
{parameters}
|
||||
|
||||
Attributes
|
||||
----------
|
||||
{attributes}
|
||||
w_data : in, width
|
||||
Input data.
|
||||
w_rdy : out
|
||||
Asserted if there is space in the queue, i.e. ``w_en`` can be asserted to write
|
||||
a new entry.
|
||||
w_en : in
|
||||
Write strobe. Latches ``w_data`` into the queue. Does nothing if ``w_rdy`` is not asserted.
|
||||
w_level : out
|
||||
Number of unread entries.
|
||||
{w_attributes}
|
||||
r_data : out, width
|
||||
Output data. {r_data_valid}
|
||||
r_rdy : out
|
||||
Asserted if there is an entry in the queue, i.e. ``r_en`` can be asserted to read
|
||||
an existing entry.
|
||||
r_en : in
|
||||
Read strobe. Makes the next entry (if any) available on ``r_data`` at the next cycle.
|
||||
Does nothing if ``r_rdy`` is not asserted.
|
||||
r_level : out
|
||||
Number of unread entries.
|
||||
{r_attributes}
|
||||
"""
|
||||
|
||||
__doc__ = _doc_template.format(description="""
|
||||
Data written to the input interface (``w_data``, ``w_rdy``, ``w_en``) is buffered and can be
|
||||
read at the output interface (``r_data``, ``r_rdy``, ``r_en`). The data entry written first
|
||||
to the input also appears first on the output.
|
||||
""",
|
||||
parameters="",
|
||||
r_data_valid="The conditions in which ``r_data`` is valid depends on the type of the queue.",
|
||||
attributes="""
|
||||
fwft : bool
|
||||
First-word fallthrough. If set, when ``r_rdy`` rises, the first entry is already
|
||||
available, i.e. ``r_data`` is valid. Otherwise, after ``r_rdy`` rises, it is necessary
|
||||
to strobe ``r_en`` for ``r_data`` to become valid.
|
||||
""".strip(),
|
||||
w_attributes="",
|
||||
r_attributes="")
|
||||
|
||||
def __init__(self, *, width, depth, fwft):
|
||||
if not isinstance(width, int) or width < 0:
|
||||
raise TypeError("FIFO width must be a non-negative integer, not {!r}"
|
||||
.format(width))
|
||||
if not isinstance(depth, int) or depth < 0:
|
||||
raise TypeError("FIFO depth must be a non-negative integer, not {!r}"
|
||||
.format(depth))
|
||||
self.width = width
|
||||
self.depth = depth
|
||||
self.fwft = fwft
|
||||
|
||||
self.w_data = Signal(width, reset_less=True)
|
||||
self.w_rdy = Signal() # writable; not full
|
||||
self.w_en = Signal()
|
||||
self.w_level = Signal(range(depth + 1))
|
||||
|
||||
self.r_data = Signal(width, reset_less=True)
|
||||
self.r_rdy = Signal() # readable; not empty
|
||||
self.r_en = Signal()
|
||||
self.r_level = Signal(range(depth + 1))
|
||||
|
||||
|
||||
def _incr(signal, modulo):
|
||||
if modulo == 2 ** len(signal):
|
||||
return signal + 1
|
||||
else:
|
||||
return Mux(signal == modulo - 1, 0, signal + 1)
|
||||
|
||||
|
||||
class SyncFIFO(Elaboratable, FIFOInterface):
|
||||
__doc__ = FIFOInterface._doc_template.format(
|
||||
description="""
|
||||
Synchronous first in, first out queue.
|
||||
|
||||
Read and write interfaces are accessed from the same clock domain. If different clock domains
|
||||
are needed, use :class:`AsyncFIFO`.
|
||||
""".strip(),
|
||||
parameters="""
|
||||
fwft : bool
|
||||
First-word fallthrough. If set, when the queue is empty and an entry is written into it,
|
||||
that entry becomes available on the output on the same clock cycle. Otherwise, it is
|
||||
necessary to assert ``r_en`` for ``r_data`` to become valid.
|
||||
""".strip(),
|
||||
r_data_valid="""
|
||||
For FWFT queues, valid if ``r_rdy`` is asserted. For non-FWFT queues, valid on the next
|
||||
cycle after ``r_rdy`` and ``r_en`` have been asserted.
|
||||
""".strip(),
|
||||
attributes="",
|
||||
r_attributes="",
|
||||
w_attributes="")
|
||||
|
||||
def __init__(self, *, width, depth, fwft=True):
|
||||
super().__init__(width=width, depth=depth, fwft=fwft)
|
||||
|
||||
self.level = Signal(range(depth + 1))
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
if self.depth == 0:
|
||||
m.d.comb += [
|
||||
self.w_rdy.eq(0),
|
||||
self.r_rdy.eq(0),
|
||||
]
|
||||
return m
|
||||
|
||||
m.d.comb += [
|
||||
self.w_rdy.eq(self.level != self.depth),
|
||||
self.r_rdy.eq(self.level != 0),
|
||||
self.w_level.eq(self.level),
|
||||
self.r_level.eq(self.level),
|
||||
]
|
||||
|
||||
do_read = self.r_rdy & self.r_en
|
||||
do_write = self.w_rdy & self.w_en
|
||||
|
||||
storage = Memory(width=self.width, depth=self.depth)
|
||||
w_port = m.submodules.w_port = storage.write_port()
|
||||
r_port = m.submodules.r_port = storage.read_port(
|
||||
domain="comb" if self.fwft else "sync", transparent=self.fwft)
|
||||
produce = Signal(range(self.depth))
|
||||
consume = Signal(range(self.depth))
|
||||
|
||||
m.d.comb += [
|
||||
w_port.addr.eq(produce),
|
||||
w_port.data.eq(self.w_data),
|
||||
w_port.en.eq(self.w_en & self.w_rdy),
|
||||
]
|
||||
with m.If(do_write):
|
||||
m.d.sync += produce.eq(_incr(produce, self.depth))
|
||||
|
||||
m.d.comb += [
|
||||
r_port.addr.eq(consume),
|
||||
self.r_data.eq(r_port.data),
|
||||
]
|
||||
if not self.fwft:
|
||||
m.d.comb += r_port.en.eq(self.r_en)
|
||||
with m.If(do_read):
|
||||
m.d.sync += consume.eq(_incr(consume, self.depth))
|
||||
|
||||
with m.If(do_write & ~do_read):
|
||||
m.d.sync += self.level.eq(self.level + 1)
|
||||
with m.If(do_read & ~do_write):
|
||||
m.d.sync += self.level.eq(self.level - 1)
|
||||
|
||||
if platform == "formal":
|
||||
# TODO: move this logic to SymbiYosys
|
||||
with m.If(Initial()):
|
||||
m.d.comb += [
|
||||
Assume(produce < self.depth),
|
||||
Assume(consume < self.depth),
|
||||
]
|
||||
with m.If(produce == consume):
|
||||
m.d.comb += Assume((self.level == 0) | (self.level == self.depth))
|
||||
with m.If(produce > consume):
|
||||
m.d.comb += Assume(self.level == (produce - consume))
|
||||
with m.If(produce < consume):
|
||||
m.d.comb += Assume(self.level == (self.depth + produce - consume))
|
||||
with m.Else():
|
||||
m.d.comb += [
|
||||
Assert(produce < self.depth),
|
||||
Assert(consume < self.depth),
|
||||
]
|
||||
with m.If(produce == consume):
|
||||
m.d.comb += Assert((self.level == 0) | (self.level == self.depth))
|
||||
with m.If(produce > consume):
|
||||
m.d.comb += Assert(self.level == (produce - consume))
|
||||
with m.If(produce < consume):
|
||||
m.d.comb += Assert(self.level == (self.depth + produce - consume))
|
||||
|
||||
return m
|
||||
|
||||
|
||||
class SyncFIFOBuffered(Elaboratable, FIFOInterface):
|
||||
__doc__ = FIFOInterface._doc_template.format(
|
||||
description="""
|
||||
Buffered synchronous first in, first out queue.
|
||||
|
||||
This queue's interface is identical to :class:`SyncFIFO` configured as ``fwft=True``, but it
|
||||
does not use asynchronous memory reads, which are incompatible with FPGA block RAMs.
|
||||
|
||||
In exchange, the latency between an entry being written to an empty queue and that entry
|
||||
becoming available on the output is increased by one cycle compared to :class:`SyncFIFO`.
|
||||
""".strip(),
|
||||
parameters="""
|
||||
fwft : bool
|
||||
Always set.
|
||||
""".strip(),
|
||||
attributes="",
|
||||
r_data_valid="Valid if ``r_rdy`` is asserted.",
|
||||
r_attributes="""
|
||||
level : out
|
||||
Number of unread entries.
|
||||
""".strip(),
|
||||
w_attributes="")
|
||||
|
||||
def __init__(self, *, width, depth):
|
||||
super().__init__(width=width, depth=depth, fwft=True)
|
||||
|
||||
self.level = Signal(range(depth + 1))
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
if self.depth == 0:
|
||||
m.d.comb += [
|
||||
self.w_rdy.eq(0),
|
||||
self.r_rdy.eq(0),
|
||||
]
|
||||
return m
|
||||
|
||||
# Effectively, this queue treats the output register of the non-FWFT inner queue as
|
||||
# an additional storage element.
|
||||
m.submodules.unbuffered = fifo = SyncFIFO(width=self.width, depth=self.depth - 1,
|
||||
fwft=False)
|
||||
|
||||
m.d.comb += [
|
||||
fifo.w_data.eq(self.w_data),
|
||||
fifo.w_en.eq(self.w_en),
|
||||
self.w_rdy.eq(fifo.w_rdy),
|
||||
]
|
||||
|
||||
m.d.comb += [
|
||||
self.r_data.eq(fifo.r_data),
|
||||
fifo.r_en.eq(fifo.r_rdy & (~self.r_rdy | self.r_en)),
|
||||
]
|
||||
with m.If(fifo.r_en):
|
||||
m.d.sync += self.r_rdy.eq(1)
|
||||
with m.Elif(self.r_en):
|
||||
m.d.sync += self.r_rdy.eq(0)
|
||||
|
||||
m.d.comb += [
|
||||
self.level.eq(fifo.level + self.r_rdy),
|
||||
self.w_level.eq(self.level),
|
||||
self.r_level.eq(self.level),
|
||||
]
|
||||
|
||||
return m
|
||||
|
||||
|
||||
class AsyncFIFO(Elaboratable, FIFOInterface):
|
||||
__doc__ = FIFOInterface._doc_template.format(
|
||||
description="""
|
||||
Asynchronous first in, first out queue.
|
||||
|
||||
Read and write interfaces are accessed from different clock domains, which can be set when
|
||||
constructing the FIFO.
|
||||
|
||||
:class:`AsyncFIFO` can be reset from the write clock domain. When the write domain reset is
|
||||
asserted, the FIFO becomes empty. When the read domain is reset, data remains in the FIFO - the
|
||||
read domain logic should correctly handle this case.
|
||||
|
||||
:class:`AsyncFIFO` only supports power of 2 depths. Unless ``exact_depth`` is specified,
|
||||
the ``depth`` parameter is rounded up to the next power of 2.
|
||||
""".strip(),
|
||||
parameters="""
|
||||
r_domain : str
|
||||
Read clock domain.
|
||||
w_domain : str
|
||||
Write clock domain.
|
||||
""".strip(),
|
||||
attributes="""
|
||||
fwft : bool
|
||||
Always set.
|
||||
""".strip(),
|
||||
r_data_valid="Valid if ``r_rdy`` is asserted.",
|
||||
r_attributes="""
|
||||
r_rst : Signal, out
|
||||
Asserted while the FIFO is being reset by the write-domain reset (for at least one
|
||||
read-domain clock cycle).
|
||||
""".strip(),
|
||||
w_attributes="")
|
||||
|
||||
def __init__(self, *, width, depth, r_domain="read", w_domain="write", exact_depth=False):
|
||||
if depth != 0:
|
||||
try:
|
||||
depth_bits = log2_int(depth, need_pow2=exact_depth)
|
||||
depth = 1 << depth_bits
|
||||
except ValueError:
|
||||
raise ValueError("AsyncFIFO only supports depths that are powers of 2; requested "
|
||||
"exact depth {} is not"
|
||||
.format(depth)) from None
|
||||
else:
|
||||
depth_bits = 0
|
||||
super().__init__(width=width, depth=depth, fwft=True)
|
||||
|
||||
self.r_rst = Signal()
|
||||
self._r_domain = r_domain
|
||||
self._w_domain = w_domain
|
||||
self._ctr_bits = depth_bits + 1
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
if self.depth == 0:
|
||||
m.d.comb += [
|
||||
self.w_rdy.eq(0),
|
||||
self.r_rdy.eq(0),
|
||||
]
|
||||
return m
|
||||
|
||||
# The design of this queue is the "style #2" from Clifford E. Cummings' paper "Simulation
|
||||
# and Synthesis Techniques for Asynchronous FIFO Design":
|
||||
# http://www.sunburst-design.com/papers/CummingsSNUG2002SJ_FIFO1.pdf
|
||||
|
||||
do_write = self.w_rdy & self.w_en
|
||||
do_read = self.r_rdy & self.r_en
|
||||
|
||||
# TODO: extract this pattern into lib.cdc.GrayCounter
|
||||
produce_w_bin = Signal(self._ctr_bits)
|
||||
produce_w_nxt = Signal(self._ctr_bits)
|
||||
m.d.comb += produce_w_nxt.eq(produce_w_bin + do_write)
|
||||
m.d[self._w_domain] += produce_w_bin.eq(produce_w_nxt)
|
||||
|
||||
# Note: Both read-domain counters must be reset_less (see comments below)
|
||||
consume_r_bin = Signal(self._ctr_bits, reset_less=True)
|
||||
consume_r_nxt = Signal(self._ctr_bits)
|
||||
m.d.comb += consume_r_nxt.eq(consume_r_bin + do_read)
|
||||
m.d[self._r_domain] += consume_r_bin.eq(consume_r_nxt)
|
||||
|
||||
produce_w_gry = Signal(self._ctr_bits)
|
||||
produce_r_gry = Signal(self._ctr_bits)
|
||||
produce_enc = m.submodules.produce_enc = \
|
||||
GrayEncoder(self._ctr_bits)
|
||||
produce_cdc = m.submodules.produce_cdc = \
|
||||
FFSynchronizer(produce_w_gry, produce_r_gry, o_domain=self._r_domain)
|
||||
m.d.comb += produce_enc.i.eq(produce_w_nxt),
|
||||
m.d[self._w_domain] += produce_w_gry.eq(produce_enc.o)
|
||||
|
||||
consume_r_gry = Signal(self._ctr_bits, reset_less=True)
|
||||
consume_w_gry = Signal(self._ctr_bits)
|
||||
consume_enc = m.submodules.consume_enc = \
|
||||
GrayEncoder(self._ctr_bits)
|
||||
consume_cdc = m.submodules.consume_cdc = \
|
||||
FFSynchronizer(consume_r_gry, consume_w_gry, o_domain=self._w_domain)
|
||||
m.d.comb += consume_enc.i.eq(consume_r_nxt)
|
||||
m.d[self._r_domain] += consume_r_gry.eq(consume_enc.o)
|
||||
|
||||
consume_w_bin = Signal(self._ctr_bits)
|
||||
consume_dec = m.submodules.consume_dec = \
|
||||
GrayDecoder(self._ctr_bits)
|
||||
m.d.comb += consume_dec.i.eq(consume_w_gry),
|
||||
m.d[self._w_domain] += consume_w_bin.eq(consume_dec.o)
|
||||
|
||||
produce_r_bin = Signal(self._ctr_bits)
|
||||
produce_dec = m.submodules.produce_dec = \
|
||||
GrayDecoder(self._ctr_bits)
|
||||
m.d.comb += produce_dec.i.eq(produce_r_gry),
|
||||
m.d.comb += produce_r_bin.eq(produce_dec.o)
|
||||
|
||||
w_full = Signal()
|
||||
r_empty = Signal()
|
||||
m.d.comb += [
|
||||
w_full.eq((produce_w_gry[-1] != consume_w_gry[-1]) &
|
||||
(produce_w_gry[-2] != consume_w_gry[-2]) &
|
||||
(produce_w_gry[:-2] == consume_w_gry[:-2])),
|
||||
r_empty.eq(consume_r_gry == produce_r_gry),
|
||||
]
|
||||
|
||||
m.d[self._w_domain] += self.w_level.eq((produce_w_bin - consume_w_bin))
|
||||
m.d.comb += self.r_level.eq((produce_r_bin - consume_r_bin))
|
||||
|
||||
storage = Memory(width=self.width, depth=self.depth)
|
||||
w_port = m.submodules.w_port = storage.write_port(domain=self._w_domain)
|
||||
r_port = m.submodules.r_port = storage.read_port (domain=self._r_domain,
|
||||
transparent=False)
|
||||
m.d.comb += [
|
||||
w_port.addr.eq(produce_w_bin[:-1]),
|
||||
w_port.data.eq(self.w_data),
|
||||
w_port.en.eq(do_write),
|
||||
self.w_rdy.eq(~w_full),
|
||||
]
|
||||
m.d.comb += [
|
||||
r_port.addr.eq(consume_r_nxt[:-1]),
|
||||
self.r_data.eq(r_port.data),
|
||||
r_port.en.eq(1),
|
||||
self.r_rdy.eq(~r_empty),
|
||||
]
|
||||
|
||||
# Reset handling to maintain FIFO and CDC invariants in the presence of a write-domain
|
||||
# reset.
|
||||
# There is a CDC hazard associated with resetting an async FIFO - Gray code counters which
|
||||
# are reset to 0 violate their Gray code invariant. One way to handle this is to ensure
|
||||
# that both sides of the FIFO are asynchronously reset by the same signal. We adopt a
|
||||
# slight variation on this approach - reset control rests entirely with the write domain.
|
||||
# The write domain's reset signal is used to asynchronously reset the read domain's
|
||||
# counters and force the FIFO to be empty when the write domain's reset is asserted.
|
||||
# This requires the two read domain counters to be marked as "reset_less", as they are
|
||||
# reset through another mechanism. See https://github.com/nmigen/nmigen/issues/181 for the
|
||||
# full discussion.
|
||||
w_rst = ResetSignal(domain=self._w_domain, allow_reset_less=True)
|
||||
r_rst = Signal()
|
||||
|
||||
# Async-set-sync-release synchronizer avoids CDC hazards
|
||||
rst_cdc = m.submodules.rst_cdc = \
|
||||
AsyncFFSynchronizer(w_rst, r_rst, o_domain=self._r_domain)
|
||||
|
||||
# Decode Gray code counter synchronized from write domain to overwrite binary
|
||||
# counter in read domain.
|
||||
rst_dec = m.submodules.rst_dec = \
|
||||
GrayDecoder(self._ctr_bits)
|
||||
m.d.comb += rst_dec.i.eq(produce_r_gry)
|
||||
with m.If(r_rst):
|
||||
m.d.comb += r_empty.eq(1)
|
||||
m.d[self._r_domain] += consume_r_gry.eq(produce_r_gry)
|
||||
m.d[self._r_domain] += consume_r_bin.eq(rst_dec.o)
|
||||
m.d[self._r_domain] += self.r_rst.eq(1)
|
||||
with m.Else():
|
||||
m.d[self._r_domain] += self.r_rst.eq(0)
|
||||
|
||||
if platform == "formal":
|
||||
with m.If(Initial()):
|
||||
m.d.comb += Assume(produce_w_gry == (produce_w_bin ^ produce_w_bin[1:]))
|
||||
m.d.comb += Assume(consume_r_gry == (consume_r_bin ^ consume_r_bin[1:]))
|
||||
|
||||
return m
|
||||
|
||||
|
||||
class AsyncFIFOBuffered(Elaboratable, FIFOInterface):
|
||||
__doc__ = FIFOInterface._doc_template.format(
|
||||
description="""
|
||||
Buffered asynchronous first in, first out queue.
|
||||
|
||||
Read and write interfaces are accessed from different clock domains, which can be set when
|
||||
constructing the FIFO.
|
||||
|
||||
:class:`AsyncFIFOBuffered` only supports power of 2 plus one depths. Unless ``exact_depth``
|
||||
is specified, the ``depth`` parameter is rounded up to the next power of 2 plus one.
|
||||
(The output buffer acts as an additional queue element.)
|
||||
|
||||
This queue's interface is identical to :class:`AsyncFIFO`, but it has an additional register
|
||||
on the output, improving timing in case of block RAM that has large clock-to-output delay.
|
||||
|
||||
In exchange, the latency between an entry being written to an empty queue and that entry
|
||||
becoming available on the output is increased by one cycle compared to :class:`AsyncFIFO`.
|
||||
""".strip(),
|
||||
parameters="""
|
||||
r_domain : str
|
||||
Read clock domain.
|
||||
w_domain : str
|
||||
Write clock domain.
|
||||
""".strip(),
|
||||
attributes="""
|
||||
fwft : bool
|
||||
Always set.
|
||||
""".strip(),
|
||||
r_data_valid="Valid if ``r_rdy`` is asserted.",
|
||||
r_attributes="""
|
||||
r_rst : Signal, out
|
||||
Asserted while the FIFO is being reset by the write-domain reset (for at least one
|
||||
read-domain clock cycle).
|
||||
""".strip(),
|
||||
w_attributes="")
|
||||
|
||||
def __init__(self, *, width, depth, r_domain="read", w_domain="write", exact_depth=False):
|
||||
if depth != 0:
|
||||
try:
|
||||
depth_bits = log2_int(max(0, depth - 1), need_pow2=exact_depth)
|
||||
depth = (1 << depth_bits) + 1
|
||||
except ValueError:
|
||||
raise ValueError("AsyncFIFOBuffered only supports depths that are one higher "
|
||||
"than powers of 2; requested exact depth {} is not"
|
||||
.format(depth)) from None
|
||||
super().__init__(width=width, depth=depth, fwft=True)
|
||||
|
||||
self.r_rst = Signal()
|
||||
self._r_domain = r_domain
|
||||
self._w_domain = w_domain
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
if self.depth == 0:
|
||||
m.d.comb += [
|
||||
self.w_rdy.eq(0),
|
||||
self.r_rdy.eq(0),
|
||||
]
|
||||
return m
|
||||
|
||||
m.submodules.unbuffered = fifo = AsyncFIFO(width=self.width, depth=self.depth - 1,
|
||||
r_domain=self._r_domain, w_domain=self._w_domain)
|
||||
|
||||
m.d.comb += [
|
||||
fifo.w_data.eq(self.w_data),
|
||||
self.w_rdy.eq(fifo.w_rdy),
|
||||
fifo.w_en.eq(self.w_en),
|
||||
]
|
||||
|
||||
r_consume_buffered = Signal()
|
||||
m.d.comb += r_consume_buffered.eq((self.r_rdy - self.r_en) & self.r_rdy)
|
||||
m.d[self._r_domain] += self.r_level.eq(fifo.r_level + r_consume_buffered)
|
||||
|
||||
w_consume_buffered = Signal()
|
||||
m.submodules.consume_buffered_cdc = FFSynchronizer(r_consume_buffered, w_consume_buffered, o_domain=self._w_domain, stages=4)
|
||||
m.d.comb += self.w_level.eq(fifo.w_level + w_consume_buffered)
|
||||
|
||||
with m.If(self.r_en | ~self.r_rdy):
|
||||
m.d[self._r_domain] += [
|
||||
self.r_data.eq(fifo.r_data),
|
||||
self.r_rdy.eq(fifo.r_rdy),
|
||||
self.r_rst.eq(fifo.r_rst),
|
||||
]
|
||||
m.d.comb += [
|
||||
fifo.r_en.eq(1)
|
||||
]
|
||||
|
||||
return m
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.lib.fifo, use amaranth.lib.fifo",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
119
nmigen/lib/io.py
119
nmigen/lib/io.py
|
|
@ -1,116 +1,7 @@
|
|||
from .. import *
|
||||
from ..hdl.rec import *
|
||||
from amaranth.lib.io import *
|
||||
from amaranth.lib.io import __all__
|
||||
|
||||
|
||||
__all__ = ["pin_layout", "Pin"]
|
||||
|
||||
|
||||
def pin_layout(width, dir, xdr=0):
|
||||
"""
|
||||
Layout of the platform interface of a pin or several pins, which may be used inside
|
||||
user-defined records.
|
||||
|
||||
See :class:`Pin` for details.
|
||||
"""
|
||||
if not isinstance(width, int) or width < 1:
|
||||
raise TypeError("Width must be a positive integer, not {!r}"
|
||||
.format(width))
|
||||
if dir not in ("i", "o", "oe", "io"):
|
||||
raise TypeError("Direction must be one of \"i\", \"o\", \"io\", or \"oe\", not {!r}"""
|
||||
.format(dir))
|
||||
if not isinstance(xdr, int) or xdr < 0:
|
||||
raise TypeError("Gearing ratio must be a non-negative integer, not {!r}"
|
||||
.format(xdr))
|
||||
|
||||
fields = []
|
||||
if dir in ("i", "io"):
|
||||
if xdr > 0:
|
||||
fields.append(("i_clk", 1))
|
||||
if xdr > 2:
|
||||
fields.append(("i_fclk", 1))
|
||||
if xdr in (0, 1):
|
||||
fields.append(("i", width))
|
||||
else:
|
||||
for n in range(xdr):
|
||||
fields.append(("i{}".format(n), width))
|
||||
if dir in ("o", "oe", "io"):
|
||||
if xdr > 0:
|
||||
fields.append(("o_clk", 1))
|
||||
if xdr > 2:
|
||||
fields.append(("o_fclk", 1))
|
||||
if xdr in (0, 1):
|
||||
fields.append(("o", width))
|
||||
else:
|
||||
for n in range(xdr):
|
||||
fields.append(("o{}".format(n), width))
|
||||
if dir in ("oe", "io"):
|
||||
fields.append(("oe", 1))
|
||||
return Layout(fields)
|
||||
|
||||
|
||||
class Pin(Record):
|
||||
"""
|
||||
An interface to an I/O buffer or a group of them that provides uniform access to input, output,
|
||||
or tristate buffers that may include a 1:n gearbox. (A 1:2 gearbox is typically called "DDR".)
|
||||
|
||||
A :class:`Pin` is identical to a :class:`Record` that uses the corresponding :meth:`pin_layout`
|
||||
except that it allos accessing the parameters like ``width`` as attributes. It is legal to use
|
||||
a plain :class:`Record` anywhere a :class:`Pin` is used, provided that these attributes are
|
||||
not necessary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Width of the ``i``/``iN`` and ``o``/``oN`` signals.
|
||||
dir : ``"i"``, ``"o"``, ``"io"``, ``"oe"``
|
||||
Direction of the buffers. If ``"i"`` is specified, only the ``i``/``iN`` signals are
|
||||
present. If ``"o"`` is specified, only the ``o``/``oN`` signals are present. If ``"oe"`` is
|
||||
specified, the ``o``/``oN`` signals are present, and an ``oe`` signal is present.
|
||||
If ``"io"`` is specified, both the ``i``/``iN`` and ``o``/``oN`` signals are present, and
|
||||
an ``oe`` signal is present.
|
||||
xdr : int
|
||||
Gearbox ratio. If equal to 0, the I/O buffer is combinatorial, and only ``i``/``o``
|
||||
signals are present. If equal to 1, the I/O buffer is SDR, and only ``i``/``o`` signals are
|
||||
present. If greater than 1, the I/O buffer includes a gearbox, and ``iN``/``oN`` signals
|
||||
are present instead, where ``N in range(0, N)``. For example, if ``xdr=2``, the I/O buffer
|
||||
is DDR; the signal ``i0`` reflects the value at the rising edge, and the signal ``i1``
|
||||
reflects the value at the falling edge.
|
||||
name : str
|
||||
Name of the underlying record.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
i_clk:
|
||||
I/O buffer input clock. Synchronizes `i*`. Present if ``xdr`` is nonzero.
|
||||
i_fclk:
|
||||
I/O buffer input fast clock. Synchronizes `i*` on higer gearbox ratios. Present if ``xdr``
|
||||
is greater than 2.
|
||||
i : Signal, out
|
||||
I/O buffer input, without gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is
|
||||
equal to 0 or 1.
|
||||
i0, i1, ... : Signal, out
|
||||
I/O buffer inputs, with gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is
|
||||
greater than 1.
|
||||
o_clk:
|
||||
I/O buffer output clock. Synchronizes `o*`, including `oe`. Present if ``xdr`` is nonzero.
|
||||
o_fclk:
|
||||
I/O buffer output fast clock. Synchronizes `o*` on higher gearbox ratios. Present if
|
||||
``xdr`` is greater than 2.
|
||||
o : Signal, in
|
||||
I/O buffer output, without gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is
|
||||
equal to 0 or 1.
|
||||
o0, o1, ... : Signal, in
|
||||
I/O buffer outputs, with gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is
|
||||
greater than 1.
|
||||
oe : Signal, in
|
||||
I/O buffer output enable. Present if ``dir="io"`` or ``dir="oe"``. Buffers generally
|
||||
cannot change direction more than once per cycle, so at most one output enable signal
|
||||
is present.
|
||||
"""
|
||||
def __init__(self, width, dir, *, xdr=0, name=None, src_loc_at=0):
|
||||
self.width = width
|
||||
self.dir = dir
|
||||
self.xdr = xdr
|
||||
|
||||
super().__init__(pin_layout(self.width, self.dir, self.xdr),
|
||||
name=name, src_loc_at=src_loc_at + 1)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.lib.io, use amaranth.lib.io",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,60 +1,7 @@
|
|||
from .. import *
|
||||
from amaranth.lib.scheduler import *
|
||||
from amaranth.lib.scheduler import __all__
|
||||
|
||||
|
||||
__all__ = ["RoundRobin"]
|
||||
|
||||
|
||||
class RoundRobin(Elaboratable):
|
||||
"""Round-robin scheduler.
|
||||
|
||||
For a given set of requests, the round-robin scheduler will
|
||||
grant one request. Once it grants a request, if any other
|
||||
requests are active, it grants the next active request with
|
||||
a greater number, restarting from zero once it reaches the
|
||||
highest one.
|
||||
|
||||
Use :class:`EnableInserter` to control when the scheduler
|
||||
is updated.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
count : int
|
||||
Number of requests.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
requests : Signal(count), in
|
||||
Set of requests.
|
||||
grant : Signal(range(count)), out
|
||||
Number of the granted request. Does not change if there are no
|
||||
active requests.
|
||||
valid : Signal(), out
|
||||
Asserted if grant corresponds to an active request. Deasserted
|
||||
otherwise, i.e. if no requests are active.
|
||||
"""
|
||||
def __init__(self, *, count):
|
||||
if not isinstance(count, int) or count < 0:
|
||||
raise ValueError("Count must be a non-negative integer, not {!r}"
|
||||
.format(count))
|
||||
self.count = count
|
||||
|
||||
self.requests = Signal(count)
|
||||
self.grant = Signal(range(count))
|
||||
self.valid = Signal()
|
||||
|
||||
def elaborate(self, platform):
|
||||
m = Module()
|
||||
|
||||
with m.Switch(self.grant):
|
||||
for i in range(self.count):
|
||||
with m.Case(i):
|
||||
for pred in reversed(range(i)):
|
||||
with m.If(self.requests[pred]):
|
||||
m.d.sync += self.grant.eq(pred)
|
||||
for succ in reversed(range(i + 1, self.count)):
|
||||
with m.If(self.requests[succ]):
|
||||
m.d.sync += self.grant.eq(succ)
|
||||
|
||||
m.d.sync += self.valid.eq(self.requests.any())
|
||||
|
||||
return m
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.lib.scheduler, use amaranth.lib.scheduler",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
114
nmigen/rpc.py
114
nmigen/rpc.py
|
|
@ -1,111 +1,7 @@
|
|||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import importlib
|
||||
|
||||
from .hdl import Signal, Record, Elaboratable
|
||||
from .back import rtlil
|
||||
from amaranth.rpc import *
|
||||
from amaranth.rpc import __all__
|
||||
|
||||
|
||||
__all__ = ["main"]
|
||||
|
||||
|
||||
def _collect_modules(names):
|
||||
modules = {}
|
||||
for name in names:
|
||||
py_module_name, py_class_name = name.rsplit(".", 1)
|
||||
py_module = importlib.import_module(py_module_name)
|
||||
if py_class_name == "*":
|
||||
for py_class_name in py_module.__all__:
|
||||
py_class = py_module.__dict__[py_class_name]
|
||||
if not issubclass(py_class, Elaboratable):
|
||||
continue
|
||||
modules["{}.{}".format(py_module_name, py_class_name)] = py_class
|
||||
else:
|
||||
py_class = py_module.__dict__[py_class_name]
|
||||
if not isinstance(py_class, type) or not issubclass(py_class, Elaboratable):
|
||||
raise TypeError("{}.{} is not a class inheriting from Elaboratable"
|
||||
.format(py_module_name, py_class_name))
|
||||
modules[name] = py_class
|
||||
return modules
|
||||
|
||||
|
||||
def _serve_yosys(modules):
|
||||
while True:
|
||||
request_json = sys.stdin.readline()
|
||||
if not request_json: break
|
||||
request = json.loads(request_json)
|
||||
|
||||
if request["method"] == "modules":
|
||||
response = {"modules": list(modules.keys())}
|
||||
|
||||
elif request["method"] == "derive":
|
||||
module_name = request["module"]
|
||||
|
||||
args, kwargs = [], {}
|
||||
for parameter_name, parameter in request["parameters"].items():
|
||||
if parameter["type"] == "unsigned":
|
||||
parameter_value = int(parameter["value"], 2)
|
||||
elif parameter["type"] == "signed":
|
||||
width = len(parameter["value"])
|
||||
parameter_value = int(parameter["value"], 2)
|
||||
if parameter_value & (1 << (width - 1)):
|
||||
parameter_value = -((1 << width) - parameter_value)
|
||||
elif parameter["type"] == "string":
|
||||
parameter_value = parameter["value"]
|
||||
elif parameter["type"] == "real":
|
||||
parameter_value = float(parameter["value"])
|
||||
else:
|
||||
raise NotImplementedError("Unrecognized parameter type {}"
|
||||
.format(parameter_name))
|
||||
if parameter_name.startswith("$"):
|
||||
index = int(parameter_name[1:])
|
||||
while len(args) < index:
|
||||
args.append(None)
|
||||
args[index] = parameter_value
|
||||
if parameter_name.startswith("\\"):
|
||||
kwargs[parameter_name[1:]] = parameter_value
|
||||
|
||||
try:
|
||||
elaboratable = modules[module_name](*args, **kwargs)
|
||||
ports = []
|
||||
# By convention, any public attribute that is a Signal or a Record is
|
||||
# considered a port.
|
||||
for port_name, port in vars(elaboratable).items():
|
||||
if not port_name.startswith("_") and isinstance(port, (Signal, Record)):
|
||||
ports += port._lhs_signals()
|
||||
rtlil_text = rtlil.convert(elaboratable, name=module_name, ports=ports)
|
||||
response = {"frontend": "ilang", "source": rtlil_text}
|
||||
except Exception as error:
|
||||
response = {"error": "{}: {}".format(type(error).__name__, str(error))}
|
||||
|
||||
else:
|
||||
return {"error": "Unrecognized method {!r}".format(request["method"])}
|
||||
|
||||
sys.stdout.write(json.dumps(response))
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=r"""
|
||||
The nMigen RPC server allows a HDL synthesis program to request an nMigen module to
|
||||
be elaborated on demand using the parameters it provides. For example, using Yosys together
|
||||
with the nMigen RPC server allows instantiating parametric nMigen modules directly
|
||||
from Verilog.
|
||||
""")
|
||||
def add_modules_arg(parser):
|
||||
parser.add_argument("modules", metavar="MODULE", type=str, nargs="+",
|
||||
help="import and provide MODULES")
|
||||
protocols = parser.add_subparsers(metavar="PROTOCOL", dest="protocol", required=True)
|
||||
protocol_yosys = protocols.add_parser("yosys", help="use Yosys JSON-based RPC protocol")
|
||||
add_modules_arg(protocol_yosys)
|
||||
|
||||
args = parser.parse_args()
|
||||
modules = _collect_modules(args.modules)
|
||||
if args.protocol == "yosys":
|
||||
_serve_yosys(modules)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.rpc, use amaranth.rpc",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
from .core import *
|
||||
from amaranth.sim import *
|
||||
from amaranth.sim import __all__
|
||||
|
||||
|
||||
__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"]
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.sim, use amaranth.sim",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,67 +0,0 @@
|
|||
__all__ = ["BaseProcess", "BaseSignalState", "BaseSimulation", "BaseEngine"]
|
||||
|
||||
|
||||
class BaseProcess:
|
||||
__slots__ = ()
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.runnable = False
|
||||
self.passive = True
|
||||
|
||||
def run(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseSignalState:
|
||||
__slots__ = ()
|
||||
|
||||
signal = NotImplemented
|
||||
|
||||
curr = NotImplemented
|
||||
next = NotImplemented
|
||||
|
||||
def set(self, value):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseSimulation:
|
||||
def reset(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_signal(self, signal):
|
||||
raise NotImplementedError
|
||||
|
||||
slots = NotImplemented
|
||||
|
||||
def add_trigger(self, process, signal, *, trigger=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_trigger(self, process, signal):
|
||||
raise NotImplementedError
|
||||
|
||||
def wait_interval(self, process, interval):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseEngine:
|
||||
def add_coroutine_process(self, process, *, default_cmd):
|
||||
raise NotImplementedError
|
||||
|
||||
def add_clock_process(self, clock, *, phase, period):
|
||||
raise NotImplementedError
|
||||
|
||||
def reset(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def now(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def advance(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def write_vcd(self, *, vcd_file, gtkw_file, traces):
|
||||
raise NotImplementedError
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
from ._base import BaseProcess
|
||||
|
||||
|
||||
__all__ = ["PyClockProcess"]
|
||||
|
||||
|
||||
class PyClockProcess(BaseProcess):
|
||||
def __init__(self, state, signal, *, phase, period):
|
||||
assert len(signal) == 1
|
||||
|
||||
self.state = state
|
||||
self.slot = self.state.get_signal(signal)
|
||||
self.phase = phase
|
||||
self.period = period
|
||||
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.runnable = True
|
||||
self.passive = True
|
||||
|
||||
self.initial = True
|
||||
|
||||
def run(self):
|
||||
self.runnable = False
|
||||
|
||||
if self.initial:
|
||||
self.initial = False
|
||||
self.state.wait_interval(self, self.phase)
|
||||
|
||||
else:
|
||||
clk_state = self.state.slots[self.slot]
|
||||
clk_state.set(not clk_state.curr)
|
||||
self.state.wait_interval(self, self.period / 2)
|
||||
|
|
@ -1,123 +0,0 @@
|
|||
import inspect
|
||||
|
||||
from ..hdl import *
|
||||
from ..hdl.ast import Statement, SignalSet
|
||||
from .core import Tick, Settle, Delay, Passive, Active
|
||||
from ._base import BaseProcess
|
||||
from ._pyrtl import _ValueCompiler, _RHSValueCompiler, _StatementCompiler
|
||||
|
||||
|
||||
__all__ = ["PyCoroProcess"]
|
||||
|
||||
|
||||
class PyCoroProcess(BaseProcess):
|
||||
def __init__(self, state, domains, constructor, *, default_cmd=None):
|
||||
self.state = state
|
||||
self.domains = domains
|
||||
self.constructor = constructor
|
||||
self.default_cmd = default_cmd
|
||||
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.runnable = True
|
||||
self.passive = False
|
||||
|
||||
self.coroutine = self.constructor()
|
||||
self.exec_locals = {
|
||||
"slots": self.state.slots,
|
||||
"result": None,
|
||||
**_ValueCompiler.helpers
|
||||
}
|
||||
self.waits_on = SignalSet()
|
||||
|
||||
def src_loc(self):
|
||||
coroutine = self.coroutine
|
||||
if coroutine is None:
|
||||
return None
|
||||
while coroutine.gi_yieldfrom is not None and inspect.isgenerator(coroutine.gi_yieldfrom):
|
||||
coroutine = coroutine.gi_yieldfrom
|
||||
if inspect.isgenerator(coroutine):
|
||||
frame = coroutine.gi_frame
|
||||
if inspect.iscoroutine(coroutine):
|
||||
frame = coroutine.cr_frame
|
||||
return "{}:{}".format(inspect.getfile(frame), inspect.getlineno(frame))
|
||||
|
||||
def add_trigger(self, signal, trigger=None):
|
||||
self.state.add_trigger(self, signal, trigger=trigger)
|
||||
self.waits_on.add(signal)
|
||||
|
||||
def clear_triggers(self):
|
||||
for signal in self.waits_on:
|
||||
self.state.remove_trigger(self, signal)
|
||||
self.waits_on.clear()
|
||||
|
||||
def run(self):
|
||||
if self.coroutine is None:
|
||||
return
|
||||
|
||||
self.clear_triggers()
|
||||
|
||||
response = None
|
||||
while True:
|
||||
try:
|
||||
command = self.coroutine.send(response)
|
||||
if command is None:
|
||||
command = self.default_cmd
|
||||
response = None
|
||||
|
||||
if isinstance(command, Value):
|
||||
exec(_RHSValueCompiler.compile(self.state, command, mode="curr"),
|
||||
self.exec_locals)
|
||||
response = Const.normalize(self.exec_locals["result"], command.shape())
|
||||
|
||||
elif isinstance(command, Statement):
|
||||
exec(_StatementCompiler.compile(self.state, command),
|
||||
self.exec_locals)
|
||||
|
||||
elif type(command) is Tick:
|
||||
domain = command.domain
|
||||
if isinstance(domain, ClockDomain):
|
||||
pass
|
||||
elif domain in self.domains:
|
||||
domain = self.domains[domain]
|
||||
else:
|
||||
raise NameError("Received command {!r} that refers to a nonexistent "
|
||||
"domain {!r} from process {!r}"
|
||||
.format(command, command.domain, self.src_loc()))
|
||||
self.add_trigger(domain.clk, trigger=1 if domain.clk_edge == "pos" else 0)
|
||||
if domain.rst is not None and domain.async_reset:
|
||||
self.add_trigger(domain.rst, trigger=1)
|
||||
return
|
||||
|
||||
elif type(command) is Settle:
|
||||
self.state.wait_interval(self, None)
|
||||
return
|
||||
|
||||
elif type(command) is Delay:
|
||||
self.state.wait_interval(self, command.interval)
|
||||
return
|
||||
|
||||
elif type(command) is Passive:
|
||||
self.passive = True
|
||||
|
||||
elif type(command) is Active:
|
||||
self.passive = False
|
||||
|
||||
elif command is None: # only possible if self.default_cmd is None
|
||||
raise TypeError("Received default command from process {!r} that was added "
|
||||
"with add_process(); did you mean to add this process with "
|
||||
"add_sync_process() instead?"
|
||||
.format(self.src_loc()))
|
||||
|
||||
else:
|
||||
raise TypeError("Received unsupported command {!r} from process {!r}"
|
||||
.format(command, self.src_loc()))
|
||||
|
||||
except StopIteration:
|
||||
self.passive = True
|
||||
self.coroutine = None
|
||||
return
|
||||
|
||||
except Exception as exn:
|
||||
self.coroutine.throw(exn)
|
||||
|
|
@ -1,451 +0,0 @@
|
|||
import os
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
|
||||
from ..hdl import *
|
||||
from ..hdl.ast import SignalSet
|
||||
from ..hdl.xfrm import ValueVisitor, StatementVisitor, LHSGroupFilter
|
||||
from ._base import BaseProcess
|
||||
|
||||
|
||||
__all__ = ["PyRTLProcess"]
|
||||
|
||||
|
||||
class PyRTLProcess(BaseProcess):
|
||||
__slots__ = ("is_comb", "runnable", "passive", "run")
|
||||
|
||||
def __init__(self, *, is_comb):
|
||||
self.is_comb = is_comb
|
||||
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.runnable = self.is_comb
|
||||
self.passive = True
|
||||
|
||||
|
||||
class _PythonEmitter:
|
||||
def __init__(self):
|
||||
self._buffer = []
|
||||
self._suffix = 0
|
||||
self._level = 0
|
||||
|
||||
def append(self, code):
|
||||
self._buffer.append(" " * self._level)
|
||||
self._buffer.append(code)
|
||||
self._buffer.append("\n")
|
||||
|
||||
@contextmanager
|
||||
def indent(self):
|
||||
self._level += 1
|
||||
yield
|
||||
self._level -= 1
|
||||
|
||||
def flush(self, indent=""):
|
||||
code = "".join(self._buffer)
|
||||
self._buffer.clear()
|
||||
return code
|
||||
|
||||
def gen_var(self, prefix):
|
||||
name = f"{prefix}_{self._suffix}"
|
||||
self._suffix += 1
|
||||
return name
|
||||
|
||||
def def_var(self, prefix, value):
|
||||
name = self.gen_var(prefix)
|
||||
self.append(f"{name} = {value}")
|
||||
return name
|
||||
|
||||
|
||||
class _Compiler:
|
||||
def __init__(self, state, emitter):
|
||||
self.state = state
|
||||
self.emitter = emitter
|
||||
|
||||
|
||||
class _ValueCompiler(ValueVisitor, _Compiler):
|
||||
helpers = {
|
||||
"sign": lambda value, sign: value | sign if value & sign else value,
|
||||
"zdiv": lambda lhs, rhs: 0 if rhs == 0 else lhs // rhs,
|
||||
"zmod": lambda lhs, rhs: 0 if rhs == 0 else lhs % rhs,
|
||||
}
|
||||
|
||||
def on_ClockSignal(self, value):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_ResetSignal(self, value):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_AnyConst(self, value):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_AnySeq(self, value):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_Sample(self, value):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_Initial(self, value):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
|
||||
class _RHSValueCompiler(_ValueCompiler):
|
||||
def __init__(self, state, emitter, *, mode, inputs=None):
|
||||
super().__init__(state, emitter)
|
||||
assert mode in ("curr", "next")
|
||||
self.mode = mode
|
||||
# If not None, `inputs` gets populated with RHS signals.
|
||||
self.inputs = inputs
|
||||
|
||||
def on_Const(self, value):
|
||||
return f"{value.value}"
|
||||
|
||||
def on_Signal(self, value):
|
||||
if self.inputs is not None:
|
||||
self.inputs.add(value)
|
||||
|
||||
if self.mode == "curr":
|
||||
return f"slots[{self.state.get_signal(value)}].{self.mode}"
|
||||
else:
|
||||
return f"next_{self.state.get_signal(value)}"
|
||||
|
||||
def on_Operator(self, value):
|
||||
def mask(value):
|
||||
value_mask = (1 << len(value)) - 1
|
||||
return f"({value_mask} & {self(value)})"
|
||||
|
||||
def sign(value):
|
||||
if value.shape().signed:
|
||||
return f"sign({mask(value)}, {-1 << (len(value) - 1)})"
|
||||
else: # unsigned
|
||||
return mask(value)
|
||||
|
||||
if len(value.operands) == 1:
|
||||
arg, = value.operands
|
||||
if value.operator == "~":
|
||||
return f"(~{self(arg)})"
|
||||
if value.operator == "-":
|
||||
return f"(-{sign(arg)})"
|
||||
if value.operator == "b":
|
||||
return f"bool({mask(arg)})"
|
||||
if value.operator == "r|":
|
||||
return f"(0 != {mask(arg)})"
|
||||
if value.operator == "r&":
|
||||
return f"({(1 << len(arg)) - 1} == {mask(arg)})"
|
||||
if value.operator == "r^":
|
||||
# Believe it or not, this is the fastest way to compute a sideways XOR in Python.
|
||||
return f"(format({mask(arg)}, 'b').count('1') % 2)"
|
||||
if value.operator in ("u", "s"):
|
||||
# These operators don't change the bit pattern, only its interpretation.
|
||||
return self(arg)
|
||||
elif len(value.operands) == 2:
|
||||
lhs, rhs = value.operands
|
||||
if value.operator == "+":
|
||||
return f"({sign(lhs)} + {sign(rhs)})"
|
||||
if value.operator == "-":
|
||||
return f"({sign(lhs)} - {sign(rhs)})"
|
||||
if value.operator == "*":
|
||||
return f"({sign(lhs)} * {sign(rhs)})"
|
||||
if value.operator == "//":
|
||||
return f"zdiv({sign(lhs)}, {sign(rhs)})"
|
||||
if value.operator == "%":
|
||||
return f"zmod({sign(lhs)}, {sign(rhs)})"
|
||||
if value.operator == "&":
|
||||
return f"({self(lhs)} & {self(rhs)})"
|
||||
if value.operator == "|":
|
||||
return f"({self(lhs)} | {self(rhs)})"
|
||||
if value.operator == "^":
|
||||
return f"({self(lhs)} ^ {self(rhs)})"
|
||||
if value.operator == "<<":
|
||||
return f"({sign(lhs)} << {sign(rhs)})"
|
||||
if value.operator == ">>":
|
||||
return f"({sign(lhs)} >> {sign(rhs)})"
|
||||
if value.operator == "==":
|
||||
return f"({sign(lhs)} == {sign(rhs)})"
|
||||
if value.operator == "!=":
|
||||
return f"({sign(lhs)} != {sign(rhs)})"
|
||||
if value.operator == "<":
|
||||
return f"({sign(lhs)} < {sign(rhs)})"
|
||||
if value.operator == "<=":
|
||||
return f"({sign(lhs)} <= {sign(rhs)})"
|
||||
if value.operator == ">":
|
||||
return f"({sign(lhs)} > {sign(rhs)})"
|
||||
if value.operator == ">=":
|
||||
return f"({sign(lhs)} >= {sign(rhs)})"
|
||||
elif len(value.operands) == 3:
|
||||
if value.operator == "m":
|
||||
sel, val1, val0 = value.operands
|
||||
return f"({self(val1)} if {mask(sel)} else {self(val0)})"
|
||||
raise NotImplementedError("Operator '{}' not implemented".format(value.operator)) # :nocov:
|
||||
|
||||
def on_Slice(self, value):
|
||||
return f"({(1 << len(value)) - 1} & ({self(value.value)} >> {value.start}))"
|
||||
|
||||
def on_Part(self, value):
|
||||
offset_mask = (1 << len(value.offset)) - 1
|
||||
offset = f"({value.stride} * ({offset_mask} & {self(value.offset)}))"
|
||||
return f"({(1 << value.width) - 1} & " \
|
||||
f"{self(value.value)} >> {offset})"
|
||||
|
||||
def on_Cat(self, value):
|
||||
gen_parts = []
|
||||
offset = 0
|
||||
for part in value.parts:
|
||||
part_mask = (1 << len(part)) - 1
|
||||
gen_parts.append(f"(({part_mask} & {self(part)}) << {offset})")
|
||||
offset += len(part)
|
||||
if gen_parts:
|
||||
return f"({' | '.join(gen_parts)})"
|
||||
return f"0"
|
||||
|
||||
def on_Repl(self, value):
|
||||
part_mask = (1 << len(value.value)) - 1
|
||||
gen_part = self.emitter.def_var("repl", f"{part_mask} & {self(value.value)}")
|
||||
gen_parts = []
|
||||
offset = 0
|
||||
for _ in range(value.count):
|
||||
gen_parts.append(f"({gen_part} << {offset})")
|
||||
offset += len(value.value)
|
||||
if gen_parts:
|
||||
return f"({' | '.join(gen_parts)})"
|
||||
return f"0"
|
||||
|
||||
def on_ArrayProxy(self, value):
|
||||
index_mask = (1 << len(value.index)) - 1
|
||||
gen_index = self.emitter.def_var("rhs_index", f"{index_mask} & {self(value.index)}")
|
||||
gen_value = self.emitter.gen_var("rhs_proxy")
|
||||
if value.elems:
|
||||
for index, elem in enumerate(value.elems):
|
||||
if index == 0:
|
||||
self.emitter.append(f"if {index} == {gen_index}:")
|
||||
else:
|
||||
self.emitter.append(f"elif {index} == {gen_index}:")
|
||||
with self.emitter.indent():
|
||||
self.emitter.append(f"{gen_value} = {self(elem)}")
|
||||
self.emitter.append(f"else:")
|
||||
with self.emitter.indent():
|
||||
self.emitter.append(f"{gen_value} = {self(value.elems[-1])}")
|
||||
return gen_value
|
||||
else:
|
||||
return f"0"
|
||||
|
||||
@classmethod
|
||||
def compile(cls, state, value, *, mode):
|
||||
emitter = _PythonEmitter()
|
||||
compiler = cls(state, emitter, mode=mode)
|
||||
emitter.append(f"result = {compiler(value)}")
|
||||
return emitter.flush()
|
||||
|
||||
|
||||
class _LHSValueCompiler(_ValueCompiler):
|
||||
def __init__(self, state, emitter, *, rhs, outputs=None):
|
||||
super().__init__(state, emitter)
|
||||
# `rrhs` is used to translate rvalues that are syntactically a part of an lvalue, e.g.
|
||||
# the offset of a Part.
|
||||
self.rrhs = rhs
|
||||
# `lrhs` is used to translate the read part of a read-modify-write cycle during partial
|
||||
# update of an lvalue.
|
||||
self.lrhs = _RHSValueCompiler(state, emitter, mode="next", inputs=None)
|
||||
# If not None, `outputs` gets populated with signals on LHS.
|
||||
self.outputs = outputs
|
||||
|
||||
def on_Const(self, value):
|
||||
raise TypeError # :nocov:
|
||||
|
||||
def on_Signal(self, value):
|
||||
if self.outputs is not None:
|
||||
self.outputs.add(value)
|
||||
|
||||
def gen(arg):
|
||||
value_mask = (1 << len(value)) - 1
|
||||
if value.shape().signed:
|
||||
value_sign = f"sign({value_mask} & {arg}, {-1 << (len(value) - 1)})"
|
||||
else: # unsigned
|
||||
value_sign = f"{value_mask} & {arg}"
|
||||
self.emitter.append(f"next_{self.state.get_signal(value)} = {value_sign}")
|
||||
return gen
|
||||
|
||||
def on_Operator(self, value):
|
||||
raise TypeError # :nocov:
|
||||
|
||||
def on_Slice(self, value):
|
||||
def gen(arg):
|
||||
width_mask = (1 << (value.stop - value.start)) - 1
|
||||
self(value.value)(f"({self.lrhs(value.value)} & " \
|
||||
f"{~(width_mask << value.start)} | " \
|
||||
f"(({width_mask} & {arg}) << {value.start}))")
|
||||
return gen
|
||||
|
||||
def on_Part(self, value):
|
||||
def gen(arg):
|
||||
width_mask = (1 << value.width) - 1
|
||||
offset_mask = (1 << len(value.offset)) - 1
|
||||
offset = f"({value.stride} * ({offset_mask} & {self.rrhs(value.offset)}))"
|
||||
self(value.value)(f"({self.lrhs(value.value)} & " \
|
||||
f"~({width_mask} << {offset}) | " \
|
||||
f"(({width_mask} & {arg}) << {offset}))")
|
||||
return gen
|
||||
|
||||
def on_Cat(self, value):
|
||||
def gen(arg):
|
||||
gen_arg = self.emitter.def_var("cat", arg)
|
||||
offset = 0
|
||||
for part in value.parts:
|
||||
part_mask = (1 << len(part)) - 1
|
||||
self(part)(f"({part_mask} & ({gen_arg} >> {offset}))")
|
||||
offset += len(part)
|
||||
return gen
|
||||
|
||||
def on_Repl(self, value):
|
||||
raise TypeError # :nocov:
|
||||
|
||||
def on_ArrayProxy(self, value):
|
||||
def gen(arg):
|
||||
index_mask = (1 << len(value.index)) - 1
|
||||
gen_index = self.emitter.def_var("index", f"{self.rrhs(value.index)} & {index_mask}")
|
||||
if value.elems:
|
||||
for index, elem in enumerate(value.elems):
|
||||
if index == 0:
|
||||
self.emitter.append(f"if {index} == {gen_index}:")
|
||||
else:
|
||||
self.emitter.append(f"elif {index} == {gen_index}:")
|
||||
with self.emitter.indent():
|
||||
self(elem)(arg)
|
||||
self.emitter.append(f"else:")
|
||||
with self.emitter.indent():
|
||||
self(value.elems[-1])(arg)
|
||||
else:
|
||||
self.emitter.append(f"pass")
|
||||
return gen
|
||||
|
||||
|
||||
class _StatementCompiler(StatementVisitor, _Compiler):
|
||||
def __init__(self, state, emitter, *, inputs=None, outputs=None):
|
||||
super().__init__(state, emitter)
|
||||
self.rhs = _RHSValueCompiler(state, emitter, mode="curr", inputs=inputs)
|
||||
self.lhs = _LHSValueCompiler(state, emitter, rhs=self.rhs, outputs=outputs)
|
||||
|
||||
def on_statements(self, stmts):
|
||||
for stmt in stmts:
|
||||
self(stmt)
|
||||
if not stmts:
|
||||
self.emitter.append("pass")
|
||||
|
||||
def on_Assign(self, stmt):
|
||||
gen_rhs = f"({(1 << len(stmt.rhs)) - 1} & {self.rhs(stmt.rhs)})"
|
||||
if stmt.rhs.shape().signed:
|
||||
gen_rhs = f"sign({gen_rhs}, {-1 << (len(stmt.rhs) - 1)})"
|
||||
return self.lhs(stmt.lhs)(gen_rhs)
|
||||
|
||||
def on_Switch(self, stmt):
|
||||
gen_test = self.emitter.def_var("test",
|
||||
f"{(1 << len(stmt.test)) - 1} & {self.rhs(stmt.test)}")
|
||||
for index, (patterns, stmts) in enumerate(stmt.cases.items()):
|
||||
gen_checks = []
|
||||
if not patterns:
|
||||
gen_checks.append(f"True")
|
||||
else:
|
||||
for pattern in patterns:
|
||||
if "-" in pattern:
|
||||
mask = int("".join("0" if b == "-" else "1" for b in pattern), 2)
|
||||
value = int("".join("0" if b == "-" else b for b in pattern), 2)
|
||||
gen_checks.append(f"{value} == ({mask} & {gen_test})")
|
||||
else:
|
||||
value = int(pattern, 2)
|
||||
gen_checks.append(f"{value} == {gen_test}")
|
||||
if index == 0:
|
||||
self.emitter.append(f"if {' or '.join(gen_checks)}:")
|
||||
else:
|
||||
self.emitter.append(f"elif {' or '.join(gen_checks)}:")
|
||||
with self.emitter.indent():
|
||||
self(stmts)
|
||||
|
||||
def on_Assert(self, stmt):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_Assume(self, stmt):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
def on_Cover(self, stmt):
|
||||
raise NotImplementedError # :nocov:
|
||||
|
||||
@classmethod
|
||||
def compile(cls, state, stmt):
|
||||
output_indexes = [state.get_signal(signal) for signal in stmt._lhs_signals()]
|
||||
emitter = _PythonEmitter()
|
||||
for signal_index in output_indexes:
|
||||
emitter.append(f"next_{signal_index} = slots[{signal_index}].next")
|
||||
compiler = cls(state, emitter)
|
||||
compiler(stmt)
|
||||
for signal_index in output_indexes:
|
||||
emitter.append(f"slots[{signal_index}].set(next_{signal_index})")
|
||||
return emitter.flush()
|
||||
|
||||
|
||||
class _FragmentCompiler:
|
||||
def __init__(self, state):
|
||||
self.state = state
|
||||
|
||||
def __call__(self, fragment):
|
||||
processes = set()
|
||||
|
||||
for domain_name, domain_signals in fragment.drivers.items():
|
||||
domain_stmts = LHSGroupFilter(domain_signals)(fragment.statements)
|
||||
domain_process = PyRTLProcess(is_comb=domain_name is None)
|
||||
|
||||
emitter = _PythonEmitter()
|
||||
emitter.append(f"def run():")
|
||||
emitter._level += 1
|
||||
|
||||
if domain_name is None:
|
||||
for signal in domain_signals:
|
||||
signal_index = self.state.get_signal(signal)
|
||||
emitter.append(f"next_{signal_index} = {signal.reset}")
|
||||
|
||||
inputs = SignalSet()
|
||||
_StatementCompiler(self.state, emitter, inputs=inputs)(domain_stmts)
|
||||
|
||||
for input in inputs:
|
||||
self.state.add_trigger(domain_process, input)
|
||||
|
||||
else:
|
||||
domain = fragment.domains[domain_name]
|
||||
clk_trigger = 1 if domain.clk_edge == "pos" else 0
|
||||
self.state.add_trigger(domain_process, domain.clk, trigger=clk_trigger)
|
||||
if domain.rst is not None and domain.async_reset:
|
||||
rst_trigger = 1
|
||||
self.state.add_trigger(domain_process, domain.rst, trigger=rst_trigger)
|
||||
|
||||
for signal in domain_signals:
|
||||
signal_index = self.state.get_signal(signal)
|
||||
emitter.append(f"next_{signal_index} = slots[{signal_index}].next")
|
||||
|
||||
_StatementCompiler(self.state, emitter)(domain_stmts)
|
||||
|
||||
for signal in domain_signals:
|
||||
signal_index = self.state.get_signal(signal)
|
||||
emitter.append(f"slots[{signal_index}].set(next_{signal_index})")
|
||||
|
||||
# There shouldn't be any exceptions raised by the generated code, but if there are
|
||||
# (almost certainly due to a bug in the code generator), use this environment variable
|
||||
# to make backtraces useful.
|
||||
code = emitter.flush()
|
||||
if os.getenv("NMIGEN_pysim_dump"):
|
||||
file = tempfile.NamedTemporaryFile("w", prefix="nmigen_pysim_", delete=False)
|
||||
file.write(code)
|
||||
filename = file.name
|
||||
else:
|
||||
filename = "<string>"
|
||||
|
||||
exec_locals = {"slots": self.state.slots, **_ValueCompiler.helpers}
|
||||
exec(compile(code, filename, "exec"), exec_locals)
|
||||
domain_process.run = exec_locals["run"]
|
||||
|
||||
processes.add(domain_process)
|
||||
|
||||
for subfragment_index, (subfragment, subfragment_name) in enumerate(fragment.subfragments):
|
||||
if subfragment_name is None:
|
||||
subfragment_name = "U${}".format(subfragment_index)
|
||||
processes.update(self(subfragment))
|
||||
|
||||
return processes
|
||||
|
|
@ -1,206 +1,7 @@
|
|||
import inspect
|
||||
|
||||
from .._utils import deprecated
|
||||
from ..hdl.cd import *
|
||||
from ..hdl.ir import *
|
||||
from ._base import BaseEngine
|
||||
from amaranth.sim.core import *
|
||||
from amaranth.sim.core import __all__
|
||||
|
||||
|
||||
__all__ = ["Settle", "Delay", "Tick", "Passive", "Active", "Simulator"]
|
||||
|
||||
|
||||
class Command:
|
||||
pass
|
||||
|
||||
|
||||
class Settle(Command):
|
||||
def __repr__(self):
|
||||
return "(settle)"
|
||||
|
||||
|
||||
class Delay(Command):
|
||||
def __init__(self, interval=None):
|
||||
self.interval = None if interval is None else float(interval)
|
||||
|
||||
def __repr__(self):
|
||||
if self.interval is None:
|
||||
return "(delay ε)"
|
||||
else:
|
||||
return "(delay {:.3}us)".format(self.interval * 1e6)
|
||||
|
||||
|
||||
class Tick(Command):
|
||||
def __init__(self, domain="sync"):
|
||||
if not isinstance(domain, (str, ClockDomain)):
|
||||
raise TypeError("Domain must be a string or a ClockDomain instance, not {!r}"
|
||||
.format(domain))
|
||||
assert domain != "comb"
|
||||
self.domain = domain
|
||||
|
||||
def __repr__(self):
|
||||
return "(tick {})".format(self.domain)
|
||||
|
||||
|
||||
class Passive(Command):
|
||||
def __repr__(self):
|
||||
return "(passive)"
|
||||
|
||||
|
||||
class Active(Command):
|
||||
def __repr__(self):
|
||||
return "(active)"
|
||||
|
||||
|
||||
class Simulator:
|
||||
def __init__(self, fragment, *, engine="pysim"):
|
||||
if isinstance(engine, type) and issubclass(engine, BaseEngine):
|
||||
pass
|
||||
elif engine == "pysim":
|
||||
from .pysim import PySimEngine
|
||||
engine = PySimEngine
|
||||
else:
|
||||
raise TypeError("Value '{!r}' is not a simulation engine class or "
|
||||
"a simulation engine name"
|
||||
.format(engine))
|
||||
|
||||
self._fragment = Fragment.get(fragment, platform=None).prepare()
|
||||
self._engine = engine(self._fragment)
|
||||
self._clocked = set()
|
||||
|
||||
def _check_process(self, process):
|
||||
if not (inspect.isgeneratorfunction(process) or inspect.iscoroutinefunction(process)):
|
||||
raise TypeError("Cannot add a process {!r} because it is not a generator function"
|
||||
.format(process))
|
||||
return process
|
||||
|
||||
def add_process(self, process):
|
||||
process = self._check_process(process)
|
||||
def wrapper():
|
||||
# Only start a bench process after comb settling, so that the reset values are correct.
|
||||
yield Settle()
|
||||
yield from process()
|
||||
self._engine.add_coroutine_process(wrapper, default_cmd=None)
|
||||
|
||||
def add_sync_process(self, process, *, domain="sync"):
|
||||
process = self._check_process(process)
|
||||
def wrapper():
|
||||
# Only start a sync process after the first clock edge (or reset edge, if the domain
|
||||
# uses an asynchronous reset). This matches the behavior of synchronous FFs.
|
||||
yield Tick(domain)
|
||||
yield from process()
|
||||
self._engine.add_coroutine_process(wrapper, default_cmd=Tick(domain))
|
||||
|
||||
def add_clock(self, period, *, phase=None, domain="sync", if_exists=False):
|
||||
"""Add a clock process.
|
||||
|
||||
Adds a process that drives the clock signal of ``domain`` at a 50% duty cycle.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
period : float
|
||||
Clock period. The process will toggle the ``domain`` clock signal every ``period / 2``
|
||||
seconds.
|
||||
phase : None or float
|
||||
Clock phase. The process will wait ``phase`` seconds before the first clock transition.
|
||||
If not specified, defaults to ``period / 2``.
|
||||
domain : str or ClockDomain
|
||||
Driven clock domain. If specified as a string, the domain with that name is looked up
|
||||
in the root fragment of the simulation.
|
||||
if_exists : bool
|
||||
If ``False`` (the default), raise an error if the driven domain is specified as
|
||||
a string and the root fragment does not have such a domain. If ``True``, do nothing
|
||||
in this case.
|
||||
"""
|
||||
if isinstance(domain, ClockDomain):
|
||||
pass
|
||||
elif domain in self._fragment.domains:
|
||||
domain = self._fragment.domains[domain]
|
||||
elif if_exists:
|
||||
return
|
||||
else:
|
||||
raise ValueError("Domain {!r} is not present in simulation"
|
||||
.format(domain))
|
||||
if domain in self._clocked:
|
||||
raise ValueError("Domain {!r} already has a clock driving it"
|
||||
.format(domain.name))
|
||||
|
||||
if phase is None:
|
||||
# By default, delay the first edge by half period. This causes any synchronous activity
|
||||
# to happen at a non-zero time, distinguishing it from the reset values in the waveform
|
||||
# viewer.
|
||||
phase = period / 2
|
||||
self._engine.add_clock_process(domain.clk, phase=phase, period=period)
|
||||
self._clocked.add(domain)
|
||||
|
||||
def reset(self):
|
||||
"""Reset the simulation.
|
||||
|
||||
Assign the reset value to every signal in the simulation, and restart every user process.
|
||||
"""
|
||||
self._engine.reset()
|
||||
|
||||
# TODO(nmigen-0.4): replace with _real_step
|
||||
@deprecated("instead of `sim.step()`, use `sim.advance()`")
|
||||
def step(self):
|
||||
return self.advance()
|
||||
|
||||
def advance(self):
|
||||
"""Advance the simulation.
|
||||
|
||||
Run every process and commit changes until a fixed point is reached, then advance time
|
||||
to the closest deadline (if any). If there is an unstable combinatorial loop,
|
||||
this function will never return.
|
||||
|
||||
Returns ``True`` if there are any active processes, ``False`` otherwise.
|
||||
"""
|
||||
return self._engine.advance()
|
||||
|
||||
def run(self):
|
||||
"""Run the simulation while any processes are active.
|
||||
|
||||
Processes added with :meth:`add_process` and :meth:`add_sync_process` are initially active,
|
||||
and may change their status using the ``yield Passive()`` and ``yield Active()`` commands.
|
||||
Processes compiled from HDL and added with :meth:`add_clock` are always passive.
|
||||
"""
|
||||
while self.advance():
|
||||
pass
|
||||
|
||||
def run_until(self, deadline, *, run_passive=False):
|
||||
"""Run the simulation until it advances to ``deadline``.
|
||||
|
||||
If ``run_passive`` is ``False``, the simulation also stops when there are no active
|
||||
processes, similar to :meth:`run`. Otherwise, the simulation will stop only after it
|
||||
advances to or past ``deadline``.
|
||||
|
||||
If the simulation stops advancing, this function will never return.
|
||||
"""
|
||||
assert self._engine.now <= deadline
|
||||
while (self.advance() or run_passive) and self._engine.now < deadline:
|
||||
pass
|
||||
|
||||
def write_vcd(self, vcd_file, gtkw_file=None, *, traces=()):
|
||||
"""Write waveforms to a Value Change Dump file, optionally populating a GTKWave save file.
|
||||
|
||||
This method returns a context manager. It can be used as: ::
|
||||
|
||||
sim = Simulator(frag)
|
||||
sim.add_clock(1e-6)
|
||||
with sim.write_vcd("dump.vcd", "dump.gtkw"):
|
||||
sim.run_until(1e-3)
|
||||
|
||||
Arguments
|
||||
---------
|
||||
vcd_file : str or file-like object
|
||||
Verilog Value Change Dump file or filename.
|
||||
gtkw_file : str or file-like object
|
||||
GTKWave save file or filename.
|
||||
traces : iterable of Signal
|
||||
Signals to display traces for.
|
||||
"""
|
||||
if self._engine.now != 0.0:
|
||||
for file in (vcd_file, gtkw_file):
|
||||
if hasattr(file, "close"):
|
||||
file.close()
|
||||
raise ValueError("Cannot start writing waveforms after advancing simulation time")
|
||||
|
||||
return self._engine.write_vcd(vcd_file=vcd_file, gtkw_file=gtkw_file, traces=traces)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.sim.core, use amaranth.sim.core",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,336 +1,7 @@
|
|||
from contextlib import contextmanager
|
||||
import itertools
|
||||
from vcd import VCDWriter
|
||||
from vcd.gtkw import GTKWSave
|
||||
from amaranth.sim.pysim import *
|
||||
from amaranth.sim.pysim import __all__
|
||||
|
||||
from ..hdl import *
|
||||
from ..hdl.ast import SignalDict
|
||||
from ._base import *
|
||||
from ._pyrtl import _FragmentCompiler
|
||||
from ._pycoro import PyCoroProcess
|
||||
from ._pyclock import PyClockProcess
|
||||
|
||||
|
||||
__all__ = ["PySimEngine"]
|
||||
|
||||
|
||||
class _NameExtractor:
|
||||
def __init__(self):
|
||||
self.names = SignalDict()
|
||||
|
||||
def __call__(self, fragment, *, hierarchy=("top",)):
|
||||
def add_signal_name(signal):
|
||||
hierarchical_signal_name = (*hierarchy, signal.name)
|
||||
if signal not in self.names:
|
||||
self.names[signal] = {hierarchical_signal_name}
|
||||
else:
|
||||
self.names[signal].add(hierarchical_signal_name)
|
||||
|
||||
for domain_name, domain_signals in fragment.drivers.items():
|
||||
if domain_name is not None:
|
||||
domain = fragment.domains[domain_name]
|
||||
add_signal_name(domain.clk)
|
||||
if domain.rst is not None:
|
||||
add_signal_name(domain.rst)
|
||||
|
||||
for statement in fragment.statements:
|
||||
for signal in statement._lhs_signals() | statement._rhs_signals():
|
||||
if not isinstance(signal, (ClockSignal, ResetSignal)):
|
||||
add_signal_name(signal)
|
||||
|
||||
for subfragment_index, (subfragment, subfragment_name) in enumerate(fragment.subfragments):
|
||||
if subfragment_name is None:
|
||||
subfragment_name = "U${}".format(subfragment_index)
|
||||
self(subfragment, hierarchy=(*hierarchy, subfragment_name))
|
||||
|
||||
return self.names
|
||||
|
||||
|
||||
class _VCDWriter:
|
||||
@staticmethod
|
||||
def timestamp_to_vcd(timestamp):
|
||||
return timestamp * (10 ** 10) # 1/(100 ps)
|
||||
|
||||
@staticmethod
|
||||
def decode_to_vcd(signal, value):
|
||||
return signal.decoder(value).expandtabs().replace(" ", "_")
|
||||
|
||||
def __init__(self, fragment, *, vcd_file, gtkw_file=None, traces=()):
|
||||
if isinstance(vcd_file, str):
|
||||
vcd_file = open(vcd_file, "wt")
|
||||
if isinstance(gtkw_file, str):
|
||||
gtkw_file = open(gtkw_file, "wt")
|
||||
|
||||
self.vcd_vars = SignalDict()
|
||||
self.vcd_file = vcd_file
|
||||
self.vcd_writer = vcd_file and VCDWriter(self.vcd_file,
|
||||
timescale="100 ps", comment="Generated by nMigen")
|
||||
|
||||
self.gtkw_names = SignalDict()
|
||||
self.gtkw_file = gtkw_file
|
||||
self.gtkw_save = gtkw_file and GTKWSave(self.gtkw_file)
|
||||
|
||||
self.traces = []
|
||||
|
||||
signal_names = _NameExtractor()(fragment)
|
||||
|
||||
trace_names = SignalDict()
|
||||
for trace in traces:
|
||||
if trace not in signal_names:
|
||||
trace_names[trace] = {("top", trace.name)}
|
||||
self.traces.append(trace)
|
||||
|
||||
if self.vcd_writer is None:
|
||||
return
|
||||
|
||||
for signal, names in itertools.chain(signal_names.items(), trace_names.items()):
|
||||
if signal.decoder:
|
||||
var_type = "string"
|
||||
var_size = 1
|
||||
var_init = self.decode_to_vcd(signal, signal.reset)
|
||||
else:
|
||||
var_type = "wire"
|
||||
var_size = signal.width
|
||||
var_init = signal.reset
|
||||
|
||||
for (*var_scope, var_name) in names:
|
||||
suffix = None
|
||||
while True:
|
||||
try:
|
||||
if suffix is None:
|
||||
var_name_suffix = var_name
|
||||
else:
|
||||
var_name_suffix = "{}${}".format(var_name, suffix)
|
||||
if signal not in self.vcd_vars:
|
||||
vcd_var = self.vcd_writer.register_var(
|
||||
scope=var_scope, name=var_name_suffix,
|
||||
var_type=var_type, size=var_size, init=var_init)
|
||||
self.vcd_vars[signal] = vcd_var
|
||||
else:
|
||||
self.vcd_writer.register_alias(
|
||||
scope=var_scope, name=var_name_suffix,
|
||||
var=self.vcd_vars[signal])
|
||||
break
|
||||
except KeyError:
|
||||
suffix = (suffix or 0) + 1
|
||||
|
||||
if signal not in self.gtkw_names:
|
||||
self.gtkw_names[signal] = (*var_scope, var_name_suffix)
|
||||
|
||||
def update(self, timestamp, signal, value):
|
||||
vcd_var = self.vcd_vars.get(signal)
|
||||
if vcd_var is None:
|
||||
return
|
||||
|
||||
vcd_timestamp = self.timestamp_to_vcd(timestamp)
|
||||
if signal.decoder:
|
||||
var_value = self.decode_to_vcd(signal, value)
|
||||
else:
|
||||
var_value = value
|
||||
self.vcd_writer.change(vcd_var, vcd_timestamp, var_value)
|
||||
|
||||
def close(self, timestamp):
|
||||
if self.vcd_writer is not None:
|
||||
self.vcd_writer.close(self.timestamp_to_vcd(timestamp))
|
||||
|
||||
if self.gtkw_save is not None:
|
||||
self.gtkw_save.dumpfile(self.vcd_file.name)
|
||||
self.gtkw_save.dumpfile_size(self.vcd_file.tell())
|
||||
|
||||
self.gtkw_save.treeopen("top")
|
||||
for signal in self.traces:
|
||||
if len(signal) > 1 and not signal.decoder:
|
||||
suffix = "[{}:0]".format(len(signal) - 1)
|
||||
else:
|
||||
suffix = ""
|
||||
self.gtkw_save.trace(".".join(self.gtkw_names[signal]) + suffix)
|
||||
|
||||
if self.vcd_file is not None:
|
||||
self.vcd_file.close()
|
||||
if self.gtkw_file is not None:
|
||||
self.gtkw_file.close()
|
||||
|
||||
|
||||
class _Timeline:
|
||||
def __init__(self):
|
||||
self.now = 0.0
|
||||
self.deadlines = dict()
|
||||
|
||||
def reset(self):
|
||||
self.now = 0.0
|
||||
self.deadlines.clear()
|
||||
|
||||
def at(self, run_at, process):
|
||||
assert process not in self.deadlines
|
||||
self.deadlines[process] = run_at
|
||||
|
||||
def delay(self, delay_by, process):
|
||||
if delay_by is None:
|
||||
run_at = self.now
|
||||
else:
|
||||
run_at = self.now + delay_by
|
||||
self.at(run_at, process)
|
||||
|
||||
def advance(self):
|
||||
nearest_processes = set()
|
||||
nearest_deadline = None
|
||||
for process, deadline in self.deadlines.items():
|
||||
if deadline is None:
|
||||
if nearest_deadline is not None:
|
||||
nearest_processes.clear()
|
||||
nearest_processes.add(process)
|
||||
nearest_deadline = self.now
|
||||
break
|
||||
elif nearest_deadline is None or deadline <= nearest_deadline:
|
||||
assert deadline >= self.now
|
||||
if nearest_deadline is not None and deadline < nearest_deadline:
|
||||
nearest_processes.clear()
|
||||
nearest_processes.add(process)
|
||||
nearest_deadline = deadline
|
||||
|
||||
if not nearest_processes:
|
||||
return False
|
||||
|
||||
for process in nearest_processes:
|
||||
process.runnable = True
|
||||
del self.deadlines[process]
|
||||
self.now = nearest_deadline
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class _PySignalState(BaseSignalState):
|
||||
__slots__ = ("signal", "curr", "next", "waiters", "pending")
|
||||
|
||||
def __init__(self, signal, pending):
|
||||
self.signal = signal
|
||||
self.pending = pending
|
||||
self.waiters = dict()
|
||||
self.curr = self.next = signal.reset
|
||||
|
||||
def set(self, value):
|
||||
if self.next == value:
|
||||
return
|
||||
self.next = value
|
||||
self.pending.add(self)
|
||||
|
||||
def commit(self):
|
||||
if self.curr == self.next:
|
||||
return False
|
||||
self.curr = self.next
|
||||
|
||||
awoken_any = False
|
||||
for process, trigger in self.waiters.items():
|
||||
if trigger is None or trigger == self.curr:
|
||||
process.runnable = awoken_any = True
|
||||
return awoken_any
|
||||
|
||||
|
||||
class _PySimulation(BaseSimulation):
|
||||
def __init__(self):
|
||||
self.timeline = _Timeline()
|
||||
self.signals = SignalDict()
|
||||
self.slots = []
|
||||
self.pending = set()
|
||||
|
||||
def reset(self):
|
||||
self.timeline.reset()
|
||||
for signal, index in self.signals.items():
|
||||
self.slots[index].curr = self.slots[index].next = signal.reset
|
||||
self.pending.clear()
|
||||
|
||||
def get_signal(self, signal):
|
||||
try:
|
||||
return self.signals[signal]
|
||||
except KeyError:
|
||||
index = len(self.slots)
|
||||
self.slots.append(_PySignalState(signal, self.pending))
|
||||
self.signals[signal] = index
|
||||
return index
|
||||
|
||||
def add_trigger(self, process, signal, *, trigger=None):
|
||||
index = self.get_signal(signal)
|
||||
assert (process not in self.slots[index].waiters or
|
||||
self.slots[index].waiters[process] == trigger)
|
||||
self.slots[index].waiters[process] = trigger
|
||||
|
||||
def remove_trigger(self, process, signal):
|
||||
index = self.get_signal(signal)
|
||||
assert process in self.slots[index].waiters
|
||||
del self.slots[index].waiters[process]
|
||||
|
||||
def wait_interval(self, process, interval):
|
||||
self.timeline.delay(interval, process)
|
||||
|
||||
def commit(self, changed=None):
|
||||
converged = True
|
||||
for signal_state in self.pending:
|
||||
if signal_state.commit():
|
||||
converged = False
|
||||
if changed is not None:
|
||||
changed.update(self.pending)
|
||||
self.pending.clear()
|
||||
return converged
|
||||
|
||||
|
||||
class PySimEngine(BaseEngine):
|
||||
def __init__(self, fragment):
|
||||
self._state = _PySimulation()
|
||||
self._timeline = self._state.timeline
|
||||
|
||||
self._fragment = fragment
|
||||
self._processes = _FragmentCompiler(self._state)(self._fragment)
|
||||
self._vcd_writers = []
|
||||
|
||||
def add_coroutine_process(self, process, *, default_cmd):
|
||||
self._processes.add(PyCoroProcess(self._state, self._fragment.domains, process,
|
||||
default_cmd=default_cmd))
|
||||
|
||||
def add_clock_process(self, clock, *, phase, period):
|
||||
self._processes.add(PyClockProcess(self._state, clock,
|
||||
phase=phase, period=period))
|
||||
|
||||
def reset(self):
|
||||
self._state.reset()
|
||||
for process in self._processes:
|
||||
process.reset()
|
||||
|
||||
def _step(self):
|
||||
changed = set() if self._vcd_writers else None
|
||||
|
||||
# Performs the two phases of a delta cycle in a loop:
|
||||
converged = False
|
||||
while not converged:
|
||||
# 1. eval: run and suspend every non-waiting process once, queueing signal changes
|
||||
for process in self._processes:
|
||||
if process.runnable:
|
||||
process.runnable = False
|
||||
process.run()
|
||||
|
||||
# 2. commit: apply every queued signal change, waking up any waiting processes
|
||||
converged = self._state.commit(changed)
|
||||
|
||||
for vcd_writer in self._vcd_writers:
|
||||
for signal_state in changed:
|
||||
vcd_writer.update(self._timeline.now,
|
||||
signal_state.signal, signal_state.curr)
|
||||
|
||||
def advance(self):
|
||||
self._step()
|
||||
self._timeline.advance()
|
||||
return any(not process.passive for process in self._processes)
|
||||
|
||||
@property
|
||||
def now(self):
|
||||
return self._timeline.now
|
||||
|
||||
@contextmanager
|
||||
def write_vcd(self, *, vcd_file, gtkw_file, traces):
|
||||
vcd_writer = _VCDWriter(self._fragment,
|
||||
vcd_file=vcd_file, gtkw_file=gtkw_file, traces=traces)
|
||||
try:
|
||||
self._vcd_writers.append(vcd_writer)
|
||||
yield
|
||||
finally:
|
||||
vcd_writer.close(self._timeline.now)
|
||||
self._vcd_writers.remove(vcd_writer)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.sim.pysim, use amaranth.sim.pysim",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1 +1,6 @@
|
|||
# TODO(nmigen-0.4): remove the entire package
|
||||
from amaranth.test import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.test, use amaranth.test",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,84 +1,7 @@
|
|||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import textwrap
|
||||
import traceback
|
||||
import unittest
|
||||
from amaranth.test.utils import *
|
||||
from amaranth.test.utils import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from ..hdl.ast import *
|
||||
from ..hdl.ir import *
|
||||
from ..back import rtlil
|
||||
from .._toolchain import require_tool
|
||||
|
||||
|
||||
warnings.warn("nmigen.test.utils is an internal utility module that has several design flaws "
|
||||
"and was never intended as a public API; it will be removed in nmigen 0.4. "
|
||||
"if you are using FHDLTestCase, include its implementation in your codebase. "
|
||||
"see also nmigen/nmigen#487",
|
||||
warnings.warn("instead of nmigen.test.utils, use amaranth.test.utils",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
|
||||
__all__ = ["FHDLTestCase"]
|
||||
|
||||
|
||||
class FHDLTestCase(unittest.TestCase):
|
||||
def assertRepr(self, obj, repr_str):
|
||||
if isinstance(obj, list):
|
||||
obj = Statement.cast(obj)
|
||||
def prepare_repr(repr_str):
|
||||
repr_str = re.sub(r"\s+", " ", repr_str)
|
||||
repr_str = re.sub(r"\( (?=\()", "(", repr_str)
|
||||
repr_str = re.sub(r"\) (?=\))", ")", repr_str)
|
||||
return repr_str.strip()
|
||||
self.assertEqual(prepare_repr(repr(obj)), prepare_repr(repr_str))
|
||||
|
||||
def assertFormal(self, spec, mode="bmc", depth=1):
|
||||
caller, *_ = traceback.extract_stack(limit=2)
|
||||
spec_root, _ = os.path.splitext(caller.filename)
|
||||
spec_dir = os.path.dirname(spec_root)
|
||||
spec_name = "{}_{}".format(
|
||||
os.path.basename(spec_root).replace("test_", "spec_"),
|
||||
caller.name.replace("test_", "")
|
||||
)
|
||||
|
||||
# The sby -f switch seems not fully functional when sby is reading from stdin.
|
||||
if os.path.exists(os.path.join(spec_dir, spec_name)):
|
||||
shutil.rmtree(os.path.join(spec_dir, spec_name))
|
||||
|
||||
if mode == "hybrid":
|
||||
# A mix of BMC and k-induction, as per personal communication with Claire Wolf.
|
||||
script = "setattr -unset init w:* a:nmigen.sample_reg %d"
|
||||
mode = "bmc"
|
||||
else:
|
||||
script = ""
|
||||
|
||||
config = textwrap.dedent("""\
|
||||
[options]
|
||||
mode {mode}
|
||||
depth {depth}
|
||||
wait on
|
||||
|
||||
[engines]
|
||||
smtbmc
|
||||
|
||||
[script]
|
||||
read_ilang top.il
|
||||
prep
|
||||
{script}
|
||||
|
||||
[file top.il]
|
||||
{rtlil}
|
||||
""").format(
|
||||
mode=mode,
|
||||
depth=depth,
|
||||
script=script,
|
||||
rtlil=rtlil.convert(Fragment.get(spec, platform="formal"))
|
||||
)
|
||||
with subprocess.Popen([require_tool("sby"), "-f", "-d", spec_name], cwd=spec_dir,
|
||||
universal_newlines=True,
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
|
||||
stdout, stderr = proc.communicate(config)
|
||||
if proc.returncode != 0:
|
||||
self.fail("Formal verification failed:\n" + stdout)
|
||||
|
|
|
|||
|
|
@ -1,55 +1,7 @@
|
|||
import sys
|
||||
from opcode import opname
|
||||
from amaranth.tracer import *
|
||||
from amaranth.tracer import __all__
|
||||
|
||||
|
||||
__all__ = ["NameNotFound", "get_var_name", "get_src_loc"]
|
||||
|
||||
|
||||
class NameNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
_raise_exception = object()
|
||||
|
||||
|
||||
def get_var_name(depth=2, default=_raise_exception):
|
||||
frame = sys._getframe(depth)
|
||||
code = frame.f_code
|
||||
call_index = frame.f_lasti
|
||||
while True:
|
||||
call_opc = opname[code.co_code[call_index]]
|
||||
if call_opc in ("EXTENDED_ARG",):
|
||||
call_index += 2
|
||||
else:
|
||||
break
|
||||
if call_opc not in ("CALL_FUNCTION", "CALL_FUNCTION_KW", "CALL_FUNCTION_EX", "CALL_METHOD"):
|
||||
return None
|
||||
|
||||
index = call_index + 2
|
||||
while True:
|
||||
opc = opname[code.co_code[index]]
|
||||
if opc in ("STORE_NAME", "STORE_ATTR"):
|
||||
name_index = int(code.co_code[index + 1])
|
||||
return code.co_names[name_index]
|
||||
elif opc == "STORE_FAST":
|
||||
name_index = int(code.co_code[index + 1])
|
||||
return code.co_varnames[name_index]
|
||||
elif opc == "STORE_DEREF":
|
||||
name_index = int(code.co_code[index + 1])
|
||||
return code.co_cellvars[name_index]
|
||||
elif opc in ("LOAD_GLOBAL", "LOAD_NAME", "LOAD_ATTR", "LOAD_FAST", "LOAD_DEREF",
|
||||
"DUP_TOP", "BUILD_LIST"):
|
||||
index += 2
|
||||
else:
|
||||
if default is _raise_exception:
|
||||
raise NameNotFound
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
def get_src_loc(src_loc_at=0):
|
||||
# n-th frame: get_src_loc()
|
||||
# n-1th frame: caller of get_src_loc() (usually constructor)
|
||||
# n-2th frame: caller of caller (usually user code)
|
||||
frame = sys._getframe(2 + src_loc_at)
|
||||
return (frame.f_code.co_filename, frame.f_lineno)
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.tracer, use amaranth.tracer",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
|
|
@ -1,21 +1,7 @@
|
|||
__all__ = ["log2_int", "bits_for"]
|
||||
from amaranth.utils import *
|
||||
from amaranth.utils import __all__
|
||||
|
||||
|
||||
def log2_int(n, need_pow2=True):
|
||||
if n == 0:
|
||||
return 0
|
||||
r = (n - 1).bit_length()
|
||||
if need_pow2 and (1 << r) != n:
|
||||
raise ValueError("{} is not a power of 2".format(n))
|
||||
return r
|
||||
|
||||
|
||||
def bits_for(n, require_sign_bit=False):
|
||||
if n > 0:
|
||||
r = log2_int(n + 1, False)
|
||||
else:
|
||||
require_sign_bit = True
|
||||
r = log2_int(-n, False)
|
||||
if require_sign_bit:
|
||||
r += 1
|
||||
return r
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.utils, use amaranth.utils",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
6
nmigen/vendor/__init__.py
vendored
6
nmigen/vendor/__init__.py
vendored
|
|
@ -0,0 +1,6 @@
|
|||
from amaranth.vendor import *
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.vendor, use amaranth.vendor",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
574
nmigen/vendor/intel.py
vendored
574
nmigen/vendor/intel.py
vendored
|
|
@ -1,571 +1,7 @@
|
|||
from abc import abstractproperty
|
||||
|
||||
from ..hdl import *
|
||||
from ..build import *
|
||||
from amaranth.vendor.intel import *
|
||||
from amaranth.vendor.intel import __all__
|
||||
|
||||
|
||||
__all__ = ["IntelPlatform"]
|
||||
|
||||
|
||||
class IntelPlatform(TemplatedPlatform):
|
||||
"""
|
||||
Quartus toolchain
|
||||
-----------------
|
||||
|
||||
Required tools:
|
||||
* ``quartus_map``
|
||||
* ``quartus_fit``
|
||||
* ``quartus_asm``
|
||||
* ``quartus_sta``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_Quartus``, if present.
|
||||
|
||||
Available overrides:
|
||||
* ``add_settings``: inserts commands at the end of the QSF file.
|
||||
* ``add_constraints``: inserts commands at the end of the SDC file.
|
||||
* ``nproc``: sets the number of cores used by all tools.
|
||||
* ``quartus_map_opts``: adds extra options for ``quartus_map``.
|
||||
* ``quartus_fit_opts``: adds extra options for ``quartus_fit``.
|
||||
* ``quartus_asm_opts``: adds extra options for ``quartus_asm``.
|
||||
* ``quartus_sta_opts``: adds extra options for ``quartus_sta``.
|
||||
|
||||
Build products:
|
||||
* ``*.rpt``: toolchain reports.
|
||||
* ``{{name}}.sof``: bitstream as SRAM object file.
|
||||
* ``{{name}}.rbf``: bitstream as raw binary file.
|
||||
|
||||
|
||||
Mistral toolchain
|
||||
-----------------
|
||||
|
||||
Required tools:
|
||||
* ``yosys``
|
||||
* ``nextpnr-mistral``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_Mistral``, if present.
|
||||
|
||||
* ``verbose``: enables logging of informational messages to standard error.
|
||||
* ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command.
|
||||
* ``synth_opts``: adds options for ``synth_intel_alm`` Yosys command.
|
||||
* ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script.
|
||||
* ``script_after_synth``: inserts commands after ``synth_intel_alm`` in Yosys script.
|
||||
* ``yosys_opts``: adds extra options for ``yosys``.
|
||||
* ``nextpnr_opts``: adds extra options for ``nextpnr-mistral``.
|
||||
"""
|
||||
|
||||
toolchain = None # selected when creating platform
|
||||
|
||||
device = abstractproperty()
|
||||
package = abstractproperty()
|
||||
speed = abstractproperty()
|
||||
suffix = ""
|
||||
|
||||
# Quartus templates
|
||||
|
||||
quartus_suppressed_warnings = [
|
||||
10264, # All case item expressions in this case statement are onehot
|
||||
10270, # Incomplete Verilog case statement has no default case item
|
||||
10335, # Unrecognized synthesis attribute
|
||||
10763, # Verilog case statement has overlapping case item expressions with non-constant or don't care bits
|
||||
10935, # Verilog casex/casez overlaps with a previous casex/vasez item expression
|
||||
12125, # Using design file which is not specified as a design file for the current project, but contains definitions used in project
|
||||
18236, # Number of processors not specified in QSF
|
||||
292013, # Feature is only available with a valid subscription license
|
||||
]
|
||||
|
||||
quartus_required_tools = [
|
||||
"quartus_map",
|
||||
"quartus_fit",
|
||||
"quartus_asm",
|
||||
"quartus_sta",
|
||||
]
|
||||
|
||||
quartus_file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"build_{{name}}.sh": r"""
|
||||
# {{autogenerated}}
|
||||
if [ -n "${{platform._toolchain_env_var}}" ]; then
|
||||
QUARTUS_ROOTDIR=$(dirname $(dirname "${{platform._toolchain_env_var}}"))
|
||||
# Quartus' qenv.sh does not work with `set -e`.
|
||||
. "${{platform._toolchain_env_var}}"
|
||||
fi
|
||||
set -e{{verbose("x")}}
|
||||
{{emit_commands("sh")}}
|
||||
""",
|
||||
"{{name}}.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_verilog()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.qsf": r"""
|
||||
# {{autogenerated}}
|
||||
{% if get_override("nproc") -%}
|
||||
set_global_assignment -name NUM_PARALLEL_PROCESSORS {{get_override("nproc")}}
|
||||
{% endif %}
|
||||
|
||||
{% for file in platform.iter_files(".v") -%}
|
||||
set_global_assignment -name VERILOG_FILE {{file|tcl_quote}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".sv") -%}
|
||||
set_global_assignment -name SYSTEMVERILOG_FILE {{file|tcl_quote}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".vhd", ".vhdl") -%}
|
||||
set_global_assignment -name VHDL_FILE {{file|tcl_quote}}
|
||||
{% endfor %}
|
||||
set_global_assignment -name VERILOG_FILE {{name}}.v
|
||||
set_global_assignment -name TOP_LEVEL_ENTITY {{name}}
|
||||
|
||||
set_global_assignment -name DEVICE {{platform.device}}{{platform.package}}{{platform.speed}}{{platform.suffix}}
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
set_location_assignment -to {{port_name|tcl_quote}} PIN_{{pin_name}}
|
||||
{% for key, value in attrs.items() -%}
|
||||
set_instance_assignment -to {{port_name|tcl_quote}} -name {{key}} {{value|tcl_quote}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
set_global_assignment -name GENERATE_RBF_FILE ON
|
||||
|
||||
{{get_override("add_settings")|default("# (add_settings placeholder)")}}
|
||||
""",
|
||||
"{{name}}.sdc": r"""
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
{% if port_signal is not none -%}
|
||||
create_clock -name {{port_signal.name|tcl_quote}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_quote}}]
|
||||
{% else -%}
|
||||
create_clock -name {{net_signal.name|tcl_quote}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("|")|tcl_quote}}]
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
|
||||
""",
|
||||
"{{name}}.srf": r"""
|
||||
{% for warning in platform.quartus_suppressed_warnings %}
|
||||
{ "" "" "" "{{name}}.v" { } { } 0 {{warning}} "" 0 0 "Design Software" 0 -1 0 ""}
|
||||
{% endfor %}
|
||||
""",
|
||||
}
|
||||
quartus_command_templates = [
|
||||
r"""
|
||||
{{invoke_tool("quartus_map")}}
|
||||
{{get_override("quartus_map_opts")|options}}
|
||||
--rev={{name}} {{name}}
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("quartus_fit")}}
|
||||
{{get_override("quartus_fit_opts")|options}}
|
||||
--rev={{name}} {{name}}
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("quartus_asm")}}
|
||||
{{get_override("quartus_asm_opts")|options}}
|
||||
--rev={{name}} {{name}}
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("quartus_sta")}}
|
||||
{{get_override("quartus_sta_opts")|options}}
|
||||
--rev={{name}} {{name}}
|
||||
""",
|
||||
]
|
||||
|
||||
|
||||
# Mistral templates
|
||||
|
||||
mistral_required_tools = [
|
||||
"yosys",
|
||||
"nextpnr-mistral"
|
||||
]
|
||||
mistral_file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"{{name}}.il": r"""
|
||||
# {{autogenerated}}
|
||||
{{emit_rtlil()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.ys": r"""
|
||||
# {{autogenerated}}
|
||||
{% for file in platform.iter_files(".v") -%}
|
||||
read_verilog {{get_override("read_verilog_opts")|options}} {{file}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".sv") -%}
|
||||
read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".il") -%}
|
||||
read_ilang {{file}}
|
||||
{% endfor %}
|
||||
read_ilang {{name}}.il
|
||||
delete w:$verilog_initial_trigger
|
||||
{{get_override("script_after_read")|default("# (script_after_read placeholder)")}}
|
||||
synth_intel_alm {{get_override("synth_opts")|options}} -top {{name}}
|
||||
{{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}}
|
||||
write_json {{name}}.json
|
||||
""",
|
||||
"{{name}}.qsf": r"""
|
||||
# {{autogenerated}}
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
set_location_assignment -to {{port_name|tcl_quote}} PIN_{{pin_name}}
|
||||
{% for key, value in attrs.items() -%}
|
||||
set_instance_assignment -to {{port_name|tcl_quote}} -name {{key}} {{value|tcl_quote}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
""",
|
||||
|
||||
}
|
||||
mistral_command_templates = [
|
||||
r"""
|
||||
{{invoke_tool("yosys")}}
|
||||
{{quiet("-q")}}
|
||||
{{get_override("yosys_opts")|options}}
|
||||
-l {{name}}.rpt
|
||||
{{name}}.ys
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("nextpnr-mistral")}}
|
||||
{{quiet("--quiet")}}
|
||||
{{get_override("nextpnr_opts")|options}}
|
||||
--log {{name}}.tim
|
||||
--device {{platform.device}}{{platform.package}}{{platform.speed}}{{platform.suffix}}
|
||||
--json {{name}}.json
|
||||
--qsf {{name}}.qsf
|
||||
--rbf {{name}}.rbf
|
||||
"""
|
||||
]
|
||||
|
||||
# Common logic
|
||||
|
||||
def __init__(self, *, toolchain="Quartus"):
|
||||
super().__init__()
|
||||
|
||||
assert toolchain in ("Quartus", "Mistral")
|
||||
self.toolchain = toolchain
|
||||
|
||||
@property
|
||||
def required_tools(self):
|
||||
if self.toolchain == "Quartus":
|
||||
return self.quartus_required_tools
|
||||
if self.toolchain == "Mistral":
|
||||
return self.mistral_required_tools
|
||||
assert False
|
||||
|
||||
@property
|
||||
def file_templates(self):
|
||||
if self.toolchain == "Quartus":
|
||||
return self.quartus_file_templates
|
||||
if self.toolchain == "Mistral":
|
||||
return self.mistral_file_templates
|
||||
assert False
|
||||
|
||||
@property
|
||||
def command_templates(self):
|
||||
if self.toolchain == "Quartus":
|
||||
return self.quartus_command_templates
|
||||
if self.toolchain == "Mistral":
|
||||
return self.mistral_command_templates
|
||||
assert False
|
||||
|
||||
def add_clock_constraint(self, clock, frequency):
|
||||
super().add_clock_constraint(clock, frequency)
|
||||
clock.attrs["keep"] = "true"
|
||||
|
||||
@property
|
||||
def default_clk_constraint(self):
|
||||
# Internal high-speed oscillator on Cyclone V devices.
|
||||
# It is specified to not be faster than 100MHz, but the actual
|
||||
# frequency seems to vary a lot between devices. Measurements
|
||||
# of 78 to 84 MHz have been observed.
|
||||
if self.default_clk == "cyclonev_oscillator":
|
||||
assert self.device.startswith("5C")
|
||||
return Clock(100e6)
|
||||
# Otherwise, use the defined Clock resource.
|
||||
return super().default_clk_constraint
|
||||
|
||||
def create_missing_domain(self, name):
|
||||
if name == "sync" and self.default_clk == "cyclonev_oscillator":
|
||||
# Use the internal high-speed oscillator for Cyclone V devices
|
||||
assert self.device.startswith("5C")
|
||||
m = Module()
|
||||
m.domains += ClockDomain("sync")
|
||||
m.submodules += Instance("cyclonev_oscillator",
|
||||
i_oscena=Const(1),
|
||||
o_clkout=ClockSignal("sync"))
|
||||
return m
|
||||
else:
|
||||
return super().create_missing_domain(name)
|
||||
|
||||
# The altiobuf_* and altddio_* primitives are explained in the following Intel documents:
|
||||
# * https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug_altiobuf.pdf
|
||||
# * https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug_altddio.pdf
|
||||
# See also errata mentioned in: https://www.intel.com/content/www/us/en/programmable/support/support-resources/knowledge-base/solutions/rd11192012_735.html.
|
||||
|
||||
@staticmethod
|
||||
def _get_ireg(m, pin, invert):
|
||||
def get_ineg(i):
|
||||
if invert:
|
||||
i_neg = Signal.like(i, name_suffix="_neg")
|
||||
m.d.comb += i.eq(~i_neg)
|
||||
return i_neg
|
||||
else:
|
||||
return i
|
||||
|
||||
if pin.xdr == 0:
|
||||
return get_ineg(pin.i)
|
||||
elif pin.xdr == 1:
|
||||
i_sdr = Signal(pin.width, name="{}_i_sdr")
|
||||
m.submodules += Instance("$dff",
|
||||
p_CLK_POLARITY=1,
|
||||
p_WIDTH=pin.width,
|
||||
i_CLK=pin.i_clk,
|
||||
i_D=i_sdr,
|
||||
o_Q=get_ineg(pin.i),
|
||||
)
|
||||
return i_sdr
|
||||
elif pin.xdr == 2:
|
||||
i_ddr = Signal(pin.width, name="{}_i_ddr".format(pin.name))
|
||||
m.submodules["{}_i_ddr".format(pin.name)] = Instance("altddio_in",
|
||||
p_width=pin.width,
|
||||
i_datain=i_ddr,
|
||||
i_inclock=pin.i_clk,
|
||||
o_dataout_h=get_ineg(pin.i0),
|
||||
o_dataout_l=get_ineg(pin.i1),
|
||||
)
|
||||
return i_ddr
|
||||
assert False
|
||||
|
||||
@staticmethod
|
||||
def _get_oreg(m, pin, invert):
|
||||
def get_oneg(o):
|
||||
if invert:
|
||||
o_neg = Signal.like(o, name_suffix="_neg")
|
||||
m.d.comb += o_neg.eq(~o)
|
||||
return o_neg
|
||||
else:
|
||||
return o
|
||||
|
||||
if pin.xdr == 0:
|
||||
return get_oneg(pin.o)
|
||||
elif pin.xdr == 1:
|
||||
o_sdr = Signal(pin.width, name="{}_o_sdr".format(pin.name))
|
||||
m.submodules += Instance("$dff",
|
||||
p_CLK_POLARITY=1,
|
||||
p_WIDTH=pin.width,
|
||||
i_CLK=pin.o_clk,
|
||||
i_D=get_oneg(pin.o),
|
||||
o_Q=o_sdr,
|
||||
)
|
||||
return o_sdr
|
||||
elif pin.xdr == 2:
|
||||
o_ddr = Signal(pin.width, name="{}_o_ddr".format(pin.name))
|
||||
m.submodules["{}_o_ddr".format(pin.name)] = Instance("altddio_out",
|
||||
p_width=pin.width,
|
||||
o_dataout=o_ddr,
|
||||
i_outclock=pin.o_clk,
|
||||
i_datain_h=get_oneg(pin.o0),
|
||||
i_datain_l=get_oneg(pin.o1),
|
||||
)
|
||||
return o_ddr
|
||||
assert False
|
||||
|
||||
@staticmethod
|
||||
def _get_oereg(m, pin):
|
||||
# altiobuf_ requires an output enable signal for each pin, but pin.oe is 1 bit wide.
|
||||
if pin.xdr == 0:
|
||||
return Repl(pin.oe, pin.width)
|
||||
elif pin.xdr in (1, 2):
|
||||
oe_reg = Signal(pin.width, name="{}_oe_reg".format(pin.name))
|
||||
oe_reg.attrs["useioff"] = "1"
|
||||
m.submodules += Instance("$dff",
|
||||
p_CLK_POLARITY=1,
|
||||
p_WIDTH=pin.width,
|
||||
i_CLK=pin.o_clk,
|
||||
i_D=pin.oe,
|
||||
o_Q=oe_reg,
|
||||
)
|
||||
return oe_reg
|
||||
assert False
|
||||
|
||||
def get_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_in",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="FALSE",
|
||||
i_datain=port.io,
|
||||
o_dataout=self._get_ireg(m, pin, invert)
|
||||
)
|
||||
return m
|
||||
|
||||
def get_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_out",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="FALSE",
|
||||
p_use_oe="FALSE",
|
||||
i_datain=self._get_oreg(m, pin, invert),
|
||||
o_dataout=port.io,
|
||||
)
|
||||
return m
|
||||
|
||||
def get_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_out",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="FALSE",
|
||||
p_use_oe="TRUE",
|
||||
i_datain=self._get_oreg(m, pin, invert),
|
||||
o_dataout=port.io,
|
||||
i_oe=self._get_oereg(m, pin)
|
||||
)
|
||||
return m
|
||||
|
||||
def get_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_bidir",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="FALSE",
|
||||
i_datain=self._get_oreg(m, pin, invert),
|
||||
io_dataio=port.io,
|
||||
o_dataout=self._get_ireg(m, pin, invert),
|
||||
i_oe=self._get_oereg(m, pin),
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.p.attrs["useioff"] = 1
|
||||
port.n.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_in",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="TRUE",
|
||||
i_datain=port.p,
|
||||
i_datain_b=port.n,
|
||||
o_dataout=self._get_ireg(m, pin, invert)
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.p.attrs["useioff"] = 1
|
||||
port.n.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_out",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="TRUE",
|
||||
p_use_oe="FALSE",
|
||||
i_datain=self._get_oreg(m, pin, invert),
|
||||
o_dataout=port.p,
|
||||
o_dataout_b=port.n,
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.p.attrs["useioff"] = 1
|
||||
port.n.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_out",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="TRUE",
|
||||
p_use_oe="TRUE",
|
||||
i_datain=self._get_oreg(m, pin, invert),
|
||||
o_dataout=port.p,
|
||||
o_dataout_b=port.n,
|
||||
i_oe=self._get_oereg(m, pin),
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
if pin.xdr == 1:
|
||||
port.p.attrs["useioff"] = 1
|
||||
port.n.attrs["useioff"] = 1
|
||||
|
||||
m = Module()
|
||||
m.submodules[pin.name] = Instance("altiobuf_bidir",
|
||||
p_enable_bus_hold="FALSE",
|
||||
p_number_of_channels=pin.width,
|
||||
p_use_differential_mode="TRUE",
|
||||
i_datain=self._get_oreg(m, pin, invert),
|
||||
io_dataio=port.p,
|
||||
io_dataio_b=port.n,
|
||||
o_dataout=self._get_ireg(m, pin, invert),
|
||||
i_oe=self._get_oereg(m, pin),
|
||||
)
|
||||
return m
|
||||
|
||||
# The altera_std_synchronizer{,_bundle} megafunctions embed SDC constraints that mark false
|
||||
# paths, so use them instead of our default implementation.
|
||||
|
||||
def get_ff_sync(self, ff_sync):
|
||||
return Instance("altera_std_synchronizer_bundle",
|
||||
p_width=len(ff_sync.i),
|
||||
p_depth=ff_sync._stages,
|
||||
i_clk=ClockSignal(ff_sync._o_domain),
|
||||
i_reset_n=Const(1),
|
||||
i_din=ff_sync.i,
|
||||
o_dout=ff_sync.o,
|
||||
)
|
||||
|
||||
def get_async_ff_sync(self, async_ff_sync):
|
||||
m = Module()
|
||||
sync_output = Signal()
|
||||
if async_ff_sync._edge == "pos":
|
||||
m.submodules += Instance("altera_std_synchronizer",
|
||||
p_depth=async_ff_sync._stages,
|
||||
i_clk=ClockSignal(async_ff_sync._o_domain),
|
||||
i_reset_n=~async_ff_sync.i,
|
||||
i_din=Const(1),
|
||||
o_dout=sync_output,
|
||||
)
|
||||
else:
|
||||
m.submodules += Instance("altera_std_synchronizer",
|
||||
p_depth=async_ff_sync._stages,
|
||||
i_clk=ClockSignal(async_ff_sync._o_domain),
|
||||
i_reset_n=async_ff_sync.i,
|
||||
i_din=Const(1),
|
||||
o_dout=sync_output,
|
||||
)
|
||||
m.d.comb += async_ff_sync.o.eq(~sync_output)
|
||||
return m
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.vendor.intel, use amaranth.vendor.intel",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
669
nmigen/vendor/lattice_ecp5.py
vendored
669
nmigen/vendor/lattice_ecp5.py
vendored
|
|
@ -1,666 +1,7 @@
|
|||
from abc import abstractproperty
|
||||
|
||||
from ..hdl import *
|
||||
from ..build import *
|
||||
from amaranth.vendor.lattice_ecp5 import *
|
||||
from amaranth.vendor.lattice_ecp5 import __all__
|
||||
|
||||
|
||||
__all__ = ["LatticeECP5Platform"]
|
||||
|
||||
|
||||
class LatticeECP5Platform(TemplatedPlatform):
|
||||
"""
|
||||
Trellis toolchain
|
||||
-----------------
|
||||
|
||||
Required tools:
|
||||
* ``yosys``
|
||||
* ``nextpnr-ecp5``
|
||||
* ``ecppack``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_Trellis``, if present.
|
||||
|
||||
Available overrides:
|
||||
* ``verbose``: enables logging of informational messages to standard error.
|
||||
* ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command.
|
||||
* ``synth_opts``: adds options for ``synth_ecp5`` Yosys command.
|
||||
* ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script.
|
||||
* ``script_after_synth``: inserts commands after ``synth_ecp5`` in Yosys script.
|
||||
* ``yosys_opts``: adds extra options for ``yosys``.
|
||||
* ``nextpnr_opts``: adds extra options for ``nextpnr-ecp5``.
|
||||
* ``ecppack_opts``: adds extra options for ``ecppack``.
|
||||
* ``add_preferences``: inserts commands at the end of the LPF file.
|
||||
|
||||
Build products:
|
||||
* ``{{name}}.rpt``: Yosys log.
|
||||
* ``{{name}}.json``: synthesized RTL.
|
||||
* ``{{name}}.tim``: nextpnr log.
|
||||
* ``{{name}}.config``: ASCII bitstream.
|
||||
* ``{{name}}.bit``: binary bitstream.
|
||||
* ``{{name}}.svf``: JTAG programming vector.
|
||||
|
||||
Diamond toolchain
|
||||
-----------------
|
||||
|
||||
Required tools:
|
||||
* ``pnmainc``
|
||||
* ``ddtcmd``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_Diamond``, if present. On Linux, diamond_env as provided by Diamond
|
||||
itself is a good candidate. On Windows, the following script (named ``diamond_env.bat``,
|
||||
for instance) is known to work::
|
||||
|
||||
@echo off
|
||||
set PATH=C:\\lscc\\diamond\\%DIAMOND_VERSION%\\bin\\nt64;%PATH%
|
||||
|
||||
Available overrides:
|
||||
* ``script_project``: inserts commands before ``prj_project save`` in Tcl script.
|
||||
* ``script_after_export``: inserts commands after ``prj_run Export`` in Tcl script.
|
||||
* ``add_preferences``: inserts commands at the end of the LPF file.
|
||||
* ``add_constraints``: inserts commands at the end of the XDC file.
|
||||
|
||||
Build products:
|
||||
* ``{{name}}_impl/{{name}}_impl.htm``: consolidated log.
|
||||
* ``{{name}}.bit``: binary bitstream.
|
||||
* ``{{name}}.svf``: JTAG programming vector.
|
||||
"""
|
||||
|
||||
toolchain = None # selected when creating platform
|
||||
|
||||
device = abstractproperty()
|
||||
package = abstractproperty()
|
||||
speed = abstractproperty()
|
||||
grade = "C" # [C]ommercial, [I]ndustrial
|
||||
|
||||
# Trellis templates
|
||||
|
||||
_nextpnr_device_options = {
|
||||
"LFE5U-12F": "--12k",
|
||||
"LFE5U-25F": "--25k",
|
||||
"LFE5U-45F": "--45k",
|
||||
"LFE5U-85F": "--85k",
|
||||
"LFE5UM-25F": "--um-25k",
|
||||
"LFE5UM-45F": "--um-45k",
|
||||
"LFE5UM-85F": "--um-85k",
|
||||
"LFE5UM5G-25F": "--um5g-25k",
|
||||
"LFE5UM5G-45F": "--um5g-45k",
|
||||
"LFE5UM5G-85F": "--um5g-85k",
|
||||
}
|
||||
_nextpnr_package_options = {
|
||||
"BG256": "caBGA256",
|
||||
"MG285": "csfBGA285",
|
||||
"BG381": "caBGA381",
|
||||
"BG554": "caBGA554",
|
||||
"BG756": "caBGA756",
|
||||
}
|
||||
|
||||
_trellis_required_tools = [
|
||||
"yosys",
|
||||
"nextpnr-ecp5",
|
||||
"ecppack"
|
||||
]
|
||||
_trellis_file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"{{name}}.il": r"""
|
||||
# {{autogenerated}}
|
||||
{{emit_rtlil()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.ys": r"""
|
||||
# {{autogenerated}}
|
||||
{% for file in platform.iter_files(".v") -%}
|
||||
read_verilog {{get_override("read_verilog_opts")|options}} {{file}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".sv") -%}
|
||||
read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".il") -%}
|
||||
read_ilang {{file}}
|
||||
{% endfor %}
|
||||
read_ilang {{name}}.il
|
||||
delete w:$verilog_initial_trigger
|
||||
{{get_override("script_after_read")|default("# (script_after_read placeholder)")}}
|
||||
synth_ecp5 {{get_override("synth_opts")|options}} -top {{name}}
|
||||
{{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}}
|
||||
write_json {{name}}.json
|
||||
""",
|
||||
"{{name}}.lpf": r"""
|
||||
# {{autogenerated}}
|
||||
BLOCK ASYNCPATHS;
|
||||
BLOCK RESETPATHS;
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
LOCATE COMP "{{port_name}}" SITE "{{pin_name}}";
|
||||
{% if attrs -%}
|
||||
IOBUF PORT "{{port_name}}"
|
||||
{%- for key, value in attrs.items() %} {{key}}={{value}}{% endfor %};
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
{% if port_signal is not none -%}
|
||||
FREQUENCY PORT "{{port_signal.name}}" {{frequency}} HZ;
|
||||
{% else -%}
|
||||
FREQUENCY NET "{{net_signal|hierarchy(".")}}" {{frequency}} HZ;
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{get_override("add_preferences")|default("# (add_preferences placeholder)")}}
|
||||
"""
|
||||
}
|
||||
_trellis_command_templates = [
|
||||
r"""
|
||||
{{invoke_tool("yosys")}}
|
||||
{{quiet("-q")}}
|
||||
{{get_override("yosys_opts")|options}}
|
||||
-l {{name}}.rpt
|
||||
{{name}}.ys
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("nextpnr-ecp5")}}
|
||||
{{quiet("--quiet")}}
|
||||
{{get_override("nextpnr_opts")|options}}
|
||||
--log {{name}}.tim
|
||||
{{platform._nextpnr_device_options[platform.device]}}
|
||||
--package {{platform._nextpnr_package_options[platform.package]|upper}}
|
||||
--speed {{platform.speed}}
|
||||
--json {{name}}.json
|
||||
--lpf {{name}}.lpf
|
||||
--textcfg {{name}}.config
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ecppack")}}
|
||||
{{verbose("--verbose")}}
|
||||
{{get_override("ecppack_opts")|options}}
|
||||
--input {{name}}.config
|
||||
--bit {{name}}.bit
|
||||
--svf {{name}}.svf
|
||||
"""
|
||||
]
|
||||
|
||||
# Diamond templates
|
||||
|
||||
_diamond_required_tools = [
|
||||
"pnmainc",
|
||||
"ddtcmd"
|
||||
]
|
||||
_diamond_file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"build_{{name}}.sh": r"""
|
||||
# {{autogenerated}}
|
||||
set -e{{verbose("x")}}
|
||||
if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi
|
||||
if [ -n "${{platform._toolchain_env_var}}" ]; then
|
||||
bindir=$(dirname "${{platform._toolchain_env_var}}")
|
||||
. "${{platform._toolchain_env_var}}"
|
||||
fi
|
||||
{{emit_commands("sh")}}
|
||||
""",
|
||||
"{{name}}.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_verilog()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.tcl": r"""
|
||||
prj_project new -name {{name}} -impl impl -impl_dir {{name}}_impl \
|
||||
-dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} \
|
||||
-lpf {{name}}.lpf \
|
||||
-synthesis synplify
|
||||
{% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%}
|
||||
prj_src add {{file|tcl_escape}}
|
||||
{% endfor %}
|
||||
prj_src add {{name}}.v
|
||||
prj_impl option top {{name}}
|
||||
prj_src add {{name}}.sdc
|
||||
{{get_override("script_project")|default("# (script_project placeholder)")}}
|
||||
prj_project save
|
||||
prj_run Synthesis -impl impl
|
||||
prj_run Translate -impl impl
|
||||
prj_run Map -impl impl
|
||||
prj_run PAR -impl impl
|
||||
prj_run Export -impl impl -task Bitgen
|
||||
{{get_override("script_after_export")|default("# (script_after_export placeholder)")}}
|
||||
""",
|
||||
"{{name}}.lpf": r"""
|
||||
# {{autogenerated}}
|
||||
BLOCK ASYNCPATHS;
|
||||
BLOCK RESETPATHS;
|
||||
{% for port_name, pin_name, extras in platform.iter_port_constraints_bits() -%}
|
||||
LOCATE COMP "{{port_name}}" SITE "{{pin_name}}";
|
||||
IOBUF PORT "{{port_name}}"
|
||||
{%- for key, value in extras.items() %} {{key}}={{value}}{% endfor %};
|
||||
{% endfor %}
|
||||
{{get_override("add_preferences")|default("# (add_preferences placeholder)")}}
|
||||
""",
|
||||
"{{name}}.sdc": r"""
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
{% if port_signal is not none -%}
|
||||
create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}]
|
||||
{% else -%}
|
||||
create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}]
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
|
||||
""",
|
||||
}
|
||||
_diamond_command_templates = [
|
||||
# These don't have any usable command-line option overrides.
|
||||
r"""
|
||||
{{invoke_tool("pnmainc")}}
|
||||
{{name}}.tcl
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ddtcmd")}}
|
||||
-oft -bit
|
||||
-if {{name}}_impl/{{name}}_impl.bit -of {{name}}.bit
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ddtcmd")}}
|
||||
-oft -svfsingle -revd -op "Fast Program"
|
||||
-if {{name}}_impl/{{name}}_impl.bit -of {{name}}.svf
|
||||
""",
|
||||
]
|
||||
|
||||
# Common logic
|
||||
|
||||
def __init__(self, *, toolchain="Trellis"):
|
||||
super().__init__()
|
||||
|
||||
assert toolchain in ("Trellis", "Diamond")
|
||||
self.toolchain = toolchain
|
||||
|
||||
@property
|
||||
def required_tools(self):
|
||||
if self.toolchain == "Trellis":
|
||||
return self._trellis_required_tools
|
||||
if self.toolchain == "Diamond":
|
||||
return self._diamond_required_tools
|
||||
assert False
|
||||
|
||||
@property
|
||||
def file_templates(self):
|
||||
if self.toolchain == "Trellis":
|
||||
return self._trellis_file_templates
|
||||
if self.toolchain == "Diamond":
|
||||
return self._diamond_file_templates
|
||||
assert False
|
||||
|
||||
@property
|
||||
def command_templates(self):
|
||||
if self.toolchain == "Trellis":
|
||||
return self._trellis_command_templates
|
||||
if self.toolchain == "Diamond":
|
||||
return self._diamond_command_templates
|
||||
assert False
|
||||
|
||||
@property
|
||||
def default_clk_constraint(self):
|
||||
if self.default_clk == "OSCG":
|
||||
return Clock(310e6 / self.oscg_div)
|
||||
return super().default_clk_constraint
|
||||
|
||||
def create_missing_domain(self, name):
|
||||
# Lattice ECP5 devices have two global set/reset signals: PUR, which is driven at startup
|
||||
# by the configuration logic and unconditionally resets every storage element, and GSR,
|
||||
# which is driven by user logic and each storage element may be configured as affected or
|
||||
# unaffected by GSR. PUR is purely asynchronous, so even though it is a low-skew global
|
||||
# network, its deassertion may violate a setup/hold constraint with relation to a user
|
||||
# clock. To avoid this, a GSR/SGSR instance should be driven synchronized to user clock.
|
||||
if name == "sync" and self.default_clk is not None:
|
||||
m = Module()
|
||||
if self.default_clk == "OSCG":
|
||||
if not hasattr(self, "oscg_div"):
|
||||
raise ValueError("OSCG divider (oscg_div) must be an integer between 2 "
|
||||
"and 128")
|
||||
if not isinstance(self.oscg_div, int) or self.oscg_div < 2 or self.oscg_div > 128:
|
||||
raise ValueError("OSCG divider (oscg_div) must be an integer between 2 "
|
||||
"and 128, not {!r}"
|
||||
.format(self.oscg_div))
|
||||
clk_i = Signal()
|
||||
m.submodules += Instance("OSCG", p_DIV=self.oscg_div, o_OSC=clk_i)
|
||||
else:
|
||||
clk_i = self.request(self.default_clk).i
|
||||
if self.default_rst is not None:
|
||||
rst_i = self.request(self.default_rst).i
|
||||
else:
|
||||
rst_i = Const(0)
|
||||
|
||||
gsr0 = Signal()
|
||||
gsr1 = Signal()
|
||||
# There is no end-of-startup signal on ECP5, but PUR is released after IOB enable, so
|
||||
# a simple reset synchronizer (with PUR as the asynchronous reset) does the job.
|
||||
m.submodules += [
|
||||
Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=~rst_i, o_Q=gsr0),
|
||||
Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=gsr0, o_Q=gsr1),
|
||||
# Although we already synchronize the reset input to user clock, SGSR has dedicated
|
||||
# clock routing to the center of the FPGA; use that just in case it turns out to be
|
||||
# more reliable. (None of this is documented.)
|
||||
Instance("SGSR", i_CLK=clk_i, i_GSR=gsr1),
|
||||
]
|
||||
# GSR implicitly connects to every appropriate storage element. As such, the sync
|
||||
# domain is reset-less; domains driven by other clocks would need to have dedicated
|
||||
# reset circuitry or otherwise meet setup/hold constraints on their own.
|
||||
m.domains += ClockDomain("sync", reset_less=True)
|
||||
m.d.comb += ClockSignal("sync").eq(clk_i)
|
||||
return m
|
||||
|
||||
_single_ended_io_types = [
|
||||
"HSUL12", "LVCMOS12", "LVCMOS15", "LVCMOS18", "LVCMOS25", "LVCMOS33", "LVTTL33",
|
||||
"SSTL135_I", "SSTL135_II", "SSTL15_I", "SSTL15_II", "SSTL18_I", "SSTL18_II",
|
||||
]
|
||||
_differential_io_types = [
|
||||
"BLVDS25", "BLVDS25E", "HSUL12D", "LVCMOS18D", "LVCMOS25D", "LVCMOS33D",
|
||||
"LVDS", "LVDS25E", "LVPECL33", "LVPECL33E", "LVTTL33D", "MLVDS", "MLVDS25E",
|
||||
"SLVS", "SSTL135D_I", "SSTL135D_II", "SSTL15D_I", "SSTL15D_II", "SSTL18D_I",
|
||||
"SSTL18D_II", "SUBLVDS",
|
||||
]
|
||||
|
||||
def should_skip_port_component(self, port, attrs, component):
|
||||
# On ECP5, a differential IO is placed by only instantiating an IO buffer primitive at
|
||||
# the PIOA or PIOC location, which is always the non-inverting pin.
|
||||
if attrs.get("IO_TYPE", "LVCMOS25") in self._differential_io_types and component == "n":
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False):
|
||||
def get_ireg(clk, d, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("IFS1P3DX",
|
||||
i_SCLK=clk,
|
||||
i_SP=Const(1),
|
||||
i_CD=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_oreg(clk, d, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("OFS1P3DX",
|
||||
i_SCLK=clk,
|
||||
i_SP=Const(1),
|
||||
i_CD=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_oereg(clk, oe, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("OFS1P3DX",
|
||||
i_SCLK=clk,
|
||||
i_SP=Const(1),
|
||||
i_CD=Const(0),
|
||||
i_D=oe,
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_iddr(sclk, d, q0, q1):
|
||||
for bit in range(len(d)):
|
||||
m.submodules += Instance("IDDRX1F",
|
||||
i_SCLK=sclk,
|
||||
i_RST=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q0=q0[bit], o_Q1=q1[bit]
|
||||
)
|
||||
|
||||
def get_iddrx2(sclk, eclk, d, q0, q1, q2, q3):
|
||||
for bit in range(len(d)):
|
||||
m.submodules += Instance("IDDRX2F",
|
||||
i_SCLK=sclk,
|
||||
i_ECLK=eclk,
|
||||
i_RST=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q0=q0[bit], o_Q1=q1[bit], o_Q2=q2[bit], o_Q3=q3[bit]
|
||||
)
|
||||
|
||||
def get_iddr71b(sclk, eclk, d, q0, q1, q2, q3, q4, q5, q6):
|
||||
for bit in range(len(d)):
|
||||
m.submodules += Instance("IDDR71B",
|
||||
i_SCLK=sclk,
|
||||
i_ECLK=eclk,
|
||||
i_RST=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q0=q0[bit], o_Q1=q1[bit], o_Q2=q2[bit], o_Q3=q3[bit],
|
||||
o_Q4=q4[bit], o_Q5=q5[bit], o_Q6=q6[bit],
|
||||
)
|
||||
|
||||
def get_oddr(sclk, d0, d1, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("ODDRX1F",
|
||||
i_SCLK=sclk,
|
||||
i_RST=Const(0),
|
||||
i_D0=d0[bit], i_D1=d1[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_oddrx2(sclk, eclk, d0, d1, d2, d3, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("ODDRX2F",
|
||||
i_SCLK=sclk,
|
||||
i_ECLK=eclk,
|
||||
i_RST=Const(0),
|
||||
i_D0=d0[bit], i_D1=d1[bit], i_D2=d2[bit], i_D3=d3[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_oddr71b(sclk, eclk, d0, d1, d2, d3, d4, d5, d6, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("ODDR71B",
|
||||
i_SCLK=sclk,
|
||||
i_ECLK=eclk,
|
||||
i_RST=Const(0),
|
||||
i_D0=d0[bit], i_D1=d1[bit], i_D2=d2[bit], i_D3=d3[bit],
|
||||
i_D4=d4[bit], i_D5=d5[bit], i_D6=d6[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_ineg(z, invert):
|
||||
if invert:
|
||||
a = Signal.like(z, name_suffix="_n")
|
||||
m.d.comb += z.eq(~a)
|
||||
return a
|
||||
else:
|
||||
return z
|
||||
|
||||
def get_oneg(a, invert):
|
||||
if invert:
|
||||
z = Signal.like(a, name_suffix="_n")
|
||||
m.d.comb += z.eq(~a)
|
||||
return z
|
||||
else:
|
||||
return a
|
||||
|
||||
if "i" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
pin_i = get_ineg(pin.i, i_invert)
|
||||
elif pin.xdr == 2:
|
||||
pin_i0 = get_ineg(pin.i0, i_invert)
|
||||
pin_i1 = get_ineg(pin.i1, i_invert)
|
||||
elif pin.xdr == 4:
|
||||
pin_i0 = get_ineg(pin.i0, i_invert)
|
||||
pin_i1 = get_ineg(pin.i1, i_invert)
|
||||
pin_i2 = get_ineg(pin.i2, i_invert)
|
||||
pin_i3 = get_ineg(pin.i3, i_invert)
|
||||
elif pin.xdr == 7:
|
||||
pin_i0 = get_ineg(pin.i0, i_invert)
|
||||
pin_i1 = get_ineg(pin.i1, i_invert)
|
||||
pin_i2 = get_ineg(pin.i2, i_invert)
|
||||
pin_i3 = get_ineg(pin.i3, i_invert)
|
||||
pin_i4 = get_ineg(pin.i4, i_invert)
|
||||
pin_i5 = get_ineg(pin.i5, i_invert)
|
||||
pin_i6 = get_ineg(pin.i6, i_invert)
|
||||
if "o" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
pin_o = get_oneg(pin.o, o_invert)
|
||||
elif pin.xdr == 2:
|
||||
pin_o0 = get_oneg(pin.o0, o_invert)
|
||||
pin_o1 = get_oneg(pin.o1, o_invert)
|
||||
elif pin.xdr == 4:
|
||||
pin_o0 = get_oneg(pin.o0, o_invert)
|
||||
pin_o1 = get_oneg(pin.o1, o_invert)
|
||||
pin_o2 = get_oneg(pin.o2, o_invert)
|
||||
pin_o3 = get_oneg(pin.o3, o_invert)
|
||||
elif pin.xdr == 7:
|
||||
pin_o0 = get_oneg(pin.o0, o_invert)
|
||||
pin_o1 = get_oneg(pin.o1, o_invert)
|
||||
pin_o2 = get_oneg(pin.o2, o_invert)
|
||||
pin_o3 = get_oneg(pin.o3, o_invert)
|
||||
pin_o4 = get_oneg(pin.o4, o_invert)
|
||||
pin_o5 = get_oneg(pin.o5, o_invert)
|
||||
pin_o6 = get_oneg(pin.o6, o_invert)
|
||||
|
||||
i = o = t = None
|
||||
if "i" in pin.dir:
|
||||
i = Signal(pin.width, name="{}_xdr_i".format(pin.name))
|
||||
if "o" in pin.dir:
|
||||
o = Signal(pin.width, name="{}_xdr_o".format(pin.name))
|
||||
if pin.dir in ("oe", "io"):
|
||||
t = Signal(pin.width, name="{}_xdr_t".format(pin.name))
|
||||
|
||||
if pin.xdr == 0:
|
||||
if "i" in pin.dir:
|
||||
i = pin_i
|
||||
if "o" in pin.dir:
|
||||
o = pin_o
|
||||
if pin.dir in ("oe", "io"):
|
||||
t = Repl(~pin.oe, pin.width)
|
||||
elif pin.xdr == 1:
|
||||
if "i" in pin.dir:
|
||||
get_ireg(pin.i_clk, i, pin_i)
|
||||
if "o" in pin.dir:
|
||||
get_oreg(pin.o_clk, pin_o, o)
|
||||
if pin.dir in ("oe", "io"):
|
||||
get_oereg(pin.o_clk, ~pin.oe, t)
|
||||
elif pin.xdr == 2:
|
||||
if "i" in pin.dir:
|
||||
get_iddr(pin.i_clk, i, pin_i0, pin_i1)
|
||||
if "o" in pin.dir:
|
||||
get_oddr(pin.o_clk, pin_o0, pin_o1, o)
|
||||
if pin.dir in ("oe", "io"):
|
||||
get_oereg(pin.o_clk, ~pin.oe, t)
|
||||
elif pin.xdr == 4:
|
||||
if "i" in pin.dir:
|
||||
get_iddrx2(pin.i_clk, pin.i_fclk, i, pin_i0, pin_i1, pin_i2, pin_i3)
|
||||
if "o" in pin.dir:
|
||||
get_oddrx2(pin.o_clk, pin.o_fclk, pin_o0, pin_o1, pin_o2, pin_o3, o)
|
||||
if pin.dir in ("oe", "io"):
|
||||
get_oereg(pin.o_clk, ~pin.oe, t)
|
||||
elif pin.xdr == 7:
|
||||
if "i" in pin.dir:
|
||||
get_iddr71b(pin.i_clk, pin.i_fclk, i, pin_i0, pin_i1, pin_i2, pin_i3, pin_i4, pin_i5, pin_i6)
|
||||
if "o" in pin.dir:
|
||||
get_oddr71b(pin.o_clk, pin.o_fclk, pin_o0, pin_o1, pin_o2, pin_o3, pin_o4, pin_o5, pin_o6, o)
|
||||
if pin.dir in ("oe", "io"):
|
||||
get_oereg(pin.o_clk, ~pin.oe, t)
|
||||
else:
|
||||
assert False
|
||||
|
||||
return (i, o, t)
|
||||
|
||||
def get_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB",
|
||||
i_I=port.io[bit],
|
||||
o_O=i[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB",
|
||||
i_I=o[bit],
|
||||
o_O=port.io[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ",
|
||||
i_T=t[bit],
|
||||
i_I=o[bit],
|
||||
o_O=port.io[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB",
|
||||
i_T=t[bit],
|
||||
i_I=o[bit],
|
||||
o_O=i[bit],
|
||||
io_B=port.io[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB",
|
||||
i_I=port.p[bit],
|
||||
o_O=i[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB",
|
||||
i_I=o[bit],
|
||||
o_O=port.p[bit],
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ",
|
||||
i_T=t[bit],
|
||||
i_I=o[bit],
|
||||
o_O=port.p[bit],
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2, 4, 7), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB",
|
||||
i_T=t[bit],
|
||||
i_I=o[bit],
|
||||
o_O=i[bit],
|
||||
io_B=port.p[bit],
|
||||
)
|
||||
return m
|
||||
|
||||
# CDC primitives are not currently specialized for ECP5.
|
||||
# While Diamond supports false path constraints; nextpnr-ecp5 does not.
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.vendor.lattice_ecp5, use amaranth.vendor.lattice_ecp5",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
630
nmigen/vendor/lattice_ice40.py
vendored
630
nmigen/vendor/lattice_ice40.py
vendored
|
|
@ -1,627 +1,7 @@
|
|||
from abc import abstractproperty
|
||||
|
||||
from ..hdl import *
|
||||
from ..lib.cdc import ResetSynchronizer
|
||||
from ..build import *
|
||||
from amaranth.vendor.lattice_ice40 import *
|
||||
from amaranth.vendor.lattice_ice40 import __all__
|
||||
|
||||
|
||||
__all__ = ["LatticeICE40Platform"]
|
||||
|
||||
|
||||
class LatticeICE40Platform(TemplatedPlatform):
|
||||
"""
|
||||
IceStorm toolchain
|
||||
------------------
|
||||
|
||||
Required tools:
|
||||
* ``yosys``
|
||||
* ``nextpnr-ice40``
|
||||
* ``icepack``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_IceStorm``, if present.
|
||||
|
||||
Available overrides:
|
||||
* ``verbose``: enables logging of informational messages to standard error.
|
||||
* ``read_verilog_opts``: adds options for ``read_verilog`` Yosys command.
|
||||
* ``synth_opts``: adds options for ``synth_ice40`` Yosys command.
|
||||
* ``script_after_read``: inserts commands after ``read_ilang`` in Yosys script.
|
||||
* ``script_after_synth``: inserts commands after ``synth_ice40`` in Yosys script.
|
||||
* ``yosys_opts``: adds extra options for ``yosys``.
|
||||
* ``nextpnr_opts``: adds extra options for ``nextpnr-ice40``.
|
||||
* ``add_pre_pack``: inserts commands at the end in pre-pack Python script.
|
||||
* ``add_constraints``: inserts commands at the end in the PCF file.
|
||||
|
||||
Build products:
|
||||
* ``{{name}}.rpt``: Yosys log.
|
||||
* ``{{name}}.json``: synthesized RTL.
|
||||
* ``{{name}}.tim``: nextpnr log.
|
||||
* ``{{name}}.asc``: ASCII bitstream.
|
||||
* ``{{name}}.bin``: binary bitstream.
|
||||
|
||||
iCECube2 toolchain
|
||||
------------------
|
||||
|
||||
This toolchain comes in two variants: ``LSE-iCECube2`` and ``Synplify-iCECube2``.
|
||||
|
||||
Required tools:
|
||||
* iCECube2 toolchain
|
||||
* ``tclsh``
|
||||
|
||||
The environment is populated by setting the necessary environment variables based on
|
||||
``NMIGEN_ENV_iCECube2``, which must point to the root of the iCECube2 installation, and
|
||||
is required.
|
||||
|
||||
Available overrides:
|
||||
* ``verbose``: enables logging of informational messages to standard error.
|
||||
* ``lse_opts``: adds options for LSE.
|
||||
* ``script_after_add``: inserts commands after ``add_file`` in Synplify Tcl script.
|
||||
* ``script_after_options``: inserts commands after ``set_option`` in Synplify Tcl script.
|
||||
* ``add_constraints``: inserts commands in SDC file.
|
||||
* ``script_after_flow``: inserts commands after ``run_sbt_backend_auto`` in SBT
|
||||
Tcl script.
|
||||
|
||||
Build products:
|
||||
* ``{{name}}_lse.log`` (LSE) or ``{{name}}_design/{{name}}.htm`` (Synplify): synthesis log.
|
||||
* ``sbt/outputs/router/{{name}}_timing.rpt``: timing report.
|
||||
* ``{{name}}.edf``: EDIF netlist.
|
||||
* ``{{name}}.bin``: binary bitstream.
|
||||
"""
|
||||
|
||||
toolchain = None # selected when creating platform
|
||||
|
||||
device = abstractproperty()
|
||||
package = abstractproperty()
|
||||
|
||||
# IceStorm templates
|
||||
|
||||
_nextpnr_device_options = {
|
||||
"iCE40LP384": "--lp384",
|
||||
"iCE40LP1K": "--lp1k",
|
||||
"iCE40LP4K": "--lp8k",
|
||||
"iCE40LP8K": "--lp8k",
|
||||
"iCE40HX1K": "--hx1k",
|
||||
"iCE40HX4K": "--hx8k",
|
||||
"iCE40HX8K": "--hx8k",
|
||||
"iCE40UP5K": "--up5k",
|
||||
"iCE40UP3K": "--up5k",
|
||||
"iCE5LP4K": "--u4k",
|
||||
"iCE5LP2K": "--u4k",
|
||||
"iCE5LP1K": "--u4k",
|
||||
}
|
||||
_nextpnr_package_options = {
|
||||
"iCE40LP4K": ":4k",
|
||||
"iCE40HX4K": ":4k",
|
||||
"iCE40UP3K": "",
|
||||
"iCE5LP2K": "",
|
||||
"iCE5LP1K": "",
|
||||
}
|
||||
|
||||
_icestorm_required_tools = [
|
||||
"yosys",
|
||||
"nextpnr-ice40",
|
||||
"icepack",
|
||||
]
|
||||
_icestorm_file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"{{name}}.il": r"""
|
||||
# {{autogenerated}}
|
||||
{{emit_rtlil()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.ys": r"""
|
||||
# {{autogenerated}}
|
||||
{% for file in platform.iter_files(".v") -%}
|
||||
read_verilog {{get_override("read_verilog_opts")|options}} {{file}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".sv") -%}
|
||||
read_verilog -sv {{get_override("read_verilog_opts")|options}} {{file}}
|
||||
{% endfor %}
|
||||
{% for file in platform.iter_files(".il") -%}
|
||||
read_ilang {{file}}
|
||||
{% endfor %}
|
||||
read_ilang {{name}}.il
|
||||
delete w:$verilog_initial_trigger
|
||||
{{get_override("script_after_read")|default("# (script_after_read placeholder)")}}
|
||||
synth_ice40 {{get_override("synth_opts")|options}} -top {{name}}
|
||||
{{get_override("script_after_synth")|default("# (script_after_synth placeholder)")}}
|
||||
write_json {{name}}.json
|
||||
""",
|
||||
"{{name}}.pcf": r"""
|
||||
# {{autogenerated}}
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
set_io {{port_name}} {{pin_name}}
|
||||
{% endfor %}
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
set_frequency {{net_signal|hierarchy(".")}} {{frequency/1000000}}
|
||||
{% endfor%}
|
||||
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
|
||||
""",
|
||||
}
|
||||
_icestorm_command_templates = [
|
||||
r"""
|
||||
{{invoke_tool("yosys")}}
|
||||
{{quiet("-q")}}
|
||||
{{get_override("yosys_opts")|options}}
|
||||
-l {{name}}.rpt
|
||||
{{name}}.ys
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("nextpnr-ice40")}}
|
||||
{{quiet("--quiet")}}
|
||||
{{get_override("nextpnr_opts")|options}}
|
||||
--log {{name}}.tim
|
||||
{{platform._nextpnr_device_options[platform.device]}}
|
||||
--package
|
||||
{{platform.package|lower}}{{platform._nextpnr_package_options[platform.device]|
|
||||
default("")}}
|
||||
--json {{name}}.json
|
||||
--pcf {{name}}.pcf
|
||||
--asc {{name}}.asc
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("icepack")}}
|
||||
{{verbose("-v")}}
|
||||
{{name}}.asc
|
||||
{{name}}.bin
|
||||
"""
|
||||
]
|
||||
|
||||
# iCECube2 templates
|
||||
|
||||
_icecube2_required_tools = [
|
||||
"synthesis",
|
||||
"synpwrap",
|
||||
"tclsh",
|
||||
]
|
||||
_icecube2_file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"build_{{name}}.sh": r"""
|
||||
# {{autogenerated}}
|
||||
set -e{{verbose("x")}}
|
||||
if [ -n "${{platform._toolchain_env_var}}" ]; then
|
||||
# LSE environment
|
||||
export LD_LIBRARY_PATH=${{platform._toolchain_env_var}}/LSE/bin/lin64:$LD_LIBRARY_PATH
|
||||
export PATH=${{platform._toolchain_env_var}}/LSE/bin/lin64:$PATH
|
||||
export FOUNDRY=${{platform._toolchain_env_var}}/LSE
|
||||
# Synplify environment
|
||||
export LD_LIBRARY_PATH=${{platform._toolchain_env_var}}/sbt_backend/bin/linux/opt/synpwrap:$LD_LIBRARY_PATH
|
||||
export PATH=${{platform._toolchain_env_var}}/sbt_backend/bin/linux/opt/synpwrap:$PATH
|
||||
export SYNPLIFY_PATH=${{platform._toolchain_env_var}}/synpbase
|
||||
# Common environment
|
||||
export SBT_DIR=${{platform._toolchain_env_var}}/sbt_backend
|
||||
else
|
||||
echo "Variable ${{platform._toolchain_env_var}} must be set" >&2; exit 1
|
||||
fi
|
||||
{{emit_commands("sh")}}
|
||||
""",
|
||||
"{{name}}.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_verilog()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}_lse.prj": r"""
|
||||
# {{autogenerated}}
|
||||
-a SBT{{platform.family}}
|
||||
-d {{platform.device}}
|
||||
-t {{platform.package}}
|
||||
{{get_override("lse_opts")|options|default("# (lse_opts placeholder)")}}
|
||||
{% for file in platform.iter_files(".v") -%}
|
||||
-ver {{file}}
|
||||
{% endfor %}
|
||||
-ver {{name}}.v
|
||||
-sdc {{name}}.sdc
|
||||
-top {{name}}
|
||||
-output_edif {{name}}.edf
|
||||
-logfile {{name}}_lse.log
|
||||
""",
|
||||
"{{name}}_syn.prj": r"""
|
||||
# {{autogenerated}}
|
||||
{% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%}
|
||||
add_file -verilog {{file|tcl_escape}}
|
||||
{% endfor %}
|
||||
add_file -verilog {{name}}.v
|
||||
add_file -constraint {{name}}.sdc
|
||||
{{get_override("script_after_add")|default("# (script_after_add placeholder)")}}
|
||||
impl -add {{name}}_design -type fpga
|
||||
set_option -technology SBT{{platform.family}}
|
||||
set_option -part {{platform.device}}
|
||||
set_option -package {{platform.package}}
|
||||
{{get_override("script_after_options")|default("# (script_after_options placeholder)")}}
|
||||
project -result_format edif
|
||||
project -result_file {{name}}.edf
|
||||
impl -active {{name}}_design
|
||||
project -run compile
|
||||
project -run map
|
||||
project -run fpga_mapper
|
||||
file copy -force -- {{name}}_design/{{name}}.edf {{name}}.edf
|
||||
""",
|
||||
"{{name}}.sdc": r"""
|
||||
# {{autogenerated}}
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
{% if port_signal is not none -%}
|
||||
create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}]
|
||||
{% else -%}
|
||||
create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}]
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
|
||||
""",
|
||||
"{{name}}.tcl": r"""
|
||||
# {{autogenerated}}
|
||||
set device {{platform.device}}-{{platform.package}}
|
||||
set top_module {{name}}
|
||||
set proj_dir .
|
||||
set output_dir .
|
||||
set edif_file {{name}}
|
||||
set tool_options ":edifparser -y {{name}}.pcf"
|
||||
set sbt_root $::env(SBT_DIR)
|
||||
append sbt_tcl $sbt_root "/tcl/sbt_backend_synpl.tcl"
|
||||
source $sbt_tcl
|
||||
run_sbt_backend_auto $device $top_module $proj_dir $output_dir $tool_options $edif_file
|
||||
{{get_override("script_after_file")|default("# (script_after_file placeholder)")}}
|
||||
file copy -force -- sbt/outputs/bitmap/{{name}}_bitmap.bin {{name}}.bin
|
||||
exit
|
||||
""",
|
||||
"{{name}}.pcf": r"""
|
||||
# {{autogenerated}}
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
set_io {{port_name}} {{pin_name}}
|
||||
{% endfor %}
|
||||
""",
|
||||
}
|
||||
_lse_icecube2_command_templates = [
|
||||
r"""synthesis -f {{name}}_lse.prj""",
|
||||
r"""tclsh {{name}}.tcl""",
|
||||
]
|
||||
_synplify_icecube2_command_templates = [
|
||||
r"""synpwrap -prj {{name}}_syn.prj -log {{name}}_syn.log""",
|
||||
r"""tclsh {{name}}.tcl""",
|
||||
]
|
||||
|
||||
# Common logic
|
||||
|
||||
def __init__(self, *, toolchain="IceStorm"):
|
||||
super().__init__()
|
||||
|
||||
assert toolchain in ("IceStorm", "LSE-iCECube2", "Synplify-iCECube2")
|
||||
self.toolchain = toolchain
|
||||
|
||||
@property
|
||||
def family(self):
|
||||
if self.device.startswith("iCE40"):
|
||||
return "iCE40"
|
||||
if self.device.startswith("iCE5"):
|
||||
return "iCE5"
|
||||
assert False
|
||||
|
||||
@property
|
||||
def _toolchain_env_var(self):
|
||||
if self.toolchain == "IceStorm":
|
||||
return f"NMIGEN_ENV_{self.toolchain}"
|
||||
if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"):
|
||||
return f"NMIGEN_ENV_iCECube2"
|
||||
assert False
|
||||
|
||||
@property
|
||||
def required_tools(self):
|
||||
if self.toolchain == "IceStorm":
|
||||
return self._icestorm_required_tools
|
||||
if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"):
|
||||
return self._icecube2_required_tools
|
||||
assert False
|
||||
|
||||
@property
|
||||
def file_templates(self):
|
||||
if self.toolchain == "IceStorm":
|
||||
return self._icestorm_file_templates
|
||||
if self.toolchain in ("LSE-iCECube2", "Synplify-iCECube2"):
|
||||
return self._icecube2_file_templates
|
||||
assert False
|
||||
|
||||
@property
|
||||
def command_templates(self):
|
||||
if self.toolchain == "IceStorm":
|
||||
return self._icestorm_command_templates
|
||||
if self.toolchain == "LSE-iCECube2":
|
||||
return self._lse_icecube2_command_templates
|
||||
if self.toolchain == "Synplify-iCECube2":
|
||||
return self._synplify_icecube2_command_templates
|
||||
assert False
|
||||
|
||||
@property
|
||||
def default_clk_constraint(self):
|
||||
# Internal high-speed oscillator: 48 MHz / (2 ^ div)
|
||||
if self.default_clk == "SB_HFOSC":
|
||||
return Clock(48e6 / 2 ** self.hfosc_div)
|
||||
# Internal low-speed oscillator: 10 KHz
|
||||
elif self.default_clk == "SB_LFOSC":
|
||||
return Clock(10e3)
|
||||
# Otherwise, use the defined Clock resource.
|
||||
return super().default_clk_constraint
|
||||
|
||||
def create_missing_domain(self, name):
|
||||
# For unknown reasons (no errata was ever published, and no documentation mentions this
|
||||
# issue), iCE40 BRAMs read as zeroes for ~3 us after configuration and release of internal
|
||||
# global reset. Note that this is a *time-based* delay, generated purely by the internal
|
||||
# oscillator, which may not be observed nor influenced directly. For details, see links:
|
||||
# * https://github.com/cliffordwolf/icestorm/issues/76#issuecomment-289270411
|
||||
# * https://github.com/cliffordwolf/icotools/issues/2#issuecomment-299734673
|
||||
#
|
||||
# To handle this, it is necessary to have a global reset in any iCE40 design that may
|
||||
# potentially instantiate BRAMs, and assert this reset for >3 us after configuration.
|
||||
# (We add a margin of 5x to allow for PVT variation.) If the board includes a dedicated
|
||||
# reset line, this line is ORed with the power on reset.
|
||||
#
|
||||
# If an internal oscillator is selected as the default clock source, the power-on-reset
|
||||
# delay is increased to 100 us, since the oscillators are only stable after that long.
|
||||
#
|
||||
# The power-on reset timer counts up because the vendor tools do not support initialization
|
||||
# of flip-flops.
|
||||
if name == "sync" and self.default_clk is not None:
|
||||
m = Module()
|
||||
|
||||
# Internal high-speed clock: 6 MHz, 12 MHz, 24 MHz, or 48 MHz depending on the divider.
|
||||
if self.default_clk == "SB_HFOSC":
|
||||
if not hasattr(self, "hfosc_div"):
|
||||
raise ValueError("SB_HFOSC divider exponent (hfosc_div) must be an integer "
|
||||
"between 0 and 3")
|
||||
if not isinstance(self.hfosc_div, int) or self.hfosc_div < 0 or self.hfosc_div > 3:
|
||||
raise ValueError("SB_HFOSC divider exponent (hfosc_div) must be an integer "
|
||||
"between 0 and 3, not {!r}"
|
||||
.format(self.hfosc_div))
|
||||
clk_i = Signal()
|
||||
m.submodules += Instance("SB_HFOSC",
|
||||
i_CLKHFEN=1,
|
||||
i_CLKHFPU=1,
|
||||
p_CLKHF_DIV="0b{0:02b}".format(self.hfosc_div),
|
||||
o_CLKHF=clk_i)
|
||||
delay = int(100e-6 * self.default_clk_frequency)
|
||||
# Internal low-speed clock: 10 KHz.
|
||||
elif self.default_clk == "SB_LFOSC":
|
||||
clk_i = Signal()
|
||||
m.submodules += Instance("SB_LFOSC",
|
||||
i_CLKLFEN=1,
|
||||
i_CLKLFPU=1,
|
||||
o_CLKLF=clk_i)
|
||||
delay = int(100e-6 * self.default_clk_frequency)
|
||||
# User-defined clock signal.
|
||||
else:
|
||||
clk_i = self.request(self.default_clk).i
|
||||
delay = int(15e-6 * self.default_clk_frequency)
|
||||
|
||||
if self.default_rst is not None:
|
||||
rst_i = self.request(self.default_rst).i
|
||||
else:
|
||||
rst_i = Const(0)
|
||||
|
||||
# Power-on-reset domain
|
||||
m.domains += ClockDomain("por", reset_less=True, local=True)
|
||||
timer = Signal(range(delay))
|
||||
ready = Signal()
|
||||
m.d.comb += ClockSignal("por").eq(clk_i)
|
||||
with m.If(timer == delay):
|
||||
m.d.por += ready.eq(1)
|
||||
with m.Else():
|
||||
m.d.por += timer.eq(timer + 1)
|
||||
|
||||
# Primary domain
|
||||
m.domains += ClockDomain("sync")
|
||||
m.d.comb += ClockSignal("sync").eq(clk_i)
|
||||
if self.default_rst is not None:
|
||||
m.submodules.reset_sync = ResetSynchronizer(~ready | rst_i, domain="sync")
|
||||
else:
|
||||
m.d.comb += ResetSignal("sync").eq(~ready)
|
||||
|
||||
return m
|
||||
|
||||
def should_skip_port_component(self, port, attrs, component):
|
||||
# On iCE40, a differential input is placed by only instantiating an SB_IO primitive for
|
||||
# the pin with z=0, which is the non-inverting pin. The pinout unfortunately differs
|
||||
# between LP/HX and UP series:
|
||||
# * for LP/HX, z=0 is DPxxB (B is non-inverting, A is inverting)
|
||||
# * for UP, z=0 is IOB_xxA (A is non-inverting, B is inverting)
|
||||
if attrs.get("IO_STANDARD", "SB_LVCMOS") == "SB_LVDS_INPUT" and component == "n":
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_io_buffer(self, m, pin, port, attrs, *, i_invert=False, o_invert=False,
|
||||
invert_lut=False):
|
||||
def get_dff(clk, d, q):
|
||||
m.submodules += Instance("$dff",
|
||||
p_CLK_POLARITY=1,
|
||||
p_WIDTH=len(d),
|
||||
i_CLK=clk,
|
||||
i_D=d,
|
||||
o_Q=q)
|
||||
|
||||
def get_ineg(y, invert):
|
||||
if invert_lut:
|
||||
a = Signal.like(y, name_suffix="_x{}".format(1 if invert else 0))
|
||||
for bit in range(len(y)):
|
||||
m.submodules += Instance("SB_LUT4",
|
||||
p_LUT_INIT=Const(0b01 if invert else 0b10, 16),
|
||||
i_I0=a[bit],
|
||||
i_I1=Const(0),
|
||||
i_I2=Const(0),
|
||||
i_I3=Const(0),
|
||||
o_O=y[bit])
|
||||
return a
|
||||
elif invert:
|
||||
a = Signal.like(y, name_suffix="_n")
|
||||
m.d.comb += y.eq(~a)
|
||||
return a
|
||||
else:
|
||||
return y
|
||||
|
||||
def get_oneg(a, invert):
|
||||
if invert_lut:
|
||||
y = Signal.like(a, name_suffix="_x{}".format(1 if invert else 0))
|
||||
for bit in range(len(a)):
|
||||
m.submodules += Instance("SB_LUT4",
|
||||
p_LUT_INIT=Const(0b01 if invert else 0b10, 16),
|
||||
i_I0=a[bit],
|
||||
i_I1=Const(0),
|
||||
i_I2=Const(0),
|
||||
i_I3=Const(0),
|
||||
o_O=y[bit])
|
||||
return y
|
||||
elif invert:
|
||||
y = Signal.like(a, name_suffix="_n")
|
||||
m.d.comb += y.eq(~a)
|
||||
return y
|
||||
else:
|
||||
return a
|
||||
|
||||
if "GLOBAL" in attrs:
|
||||
is_global_input = bool(attrs["GLOBAL"])
|
||||
del attrs["GLOBAL"]
|
||||
else:
|
||||
is_global_input = False
|
||||
assert not (is_global_input and i_invert)
|
||||
|
||||
if "i" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
pin_i = get_ineg(pin.i, i_invert)
|
||||
elif pin.xdr == 2:
|
||||
pin_i0 = get_ineg(pin.i0, i_invert)
|
||||
pin_i1 = get_ineg(pin.i1, i_invert)
|
||||
if "o" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
pin_o = get_oneg(pin.o, o_invert)
|
||||
elif pin.xdr == 2:
|
||||
pin_o0 = get_oneg(pin.o0, o_invert)
|
||||
pin_o1 = get_oneg(pin.o1, o_invert)
|
||||
|
||||
if "i" in pin.dir and pin.xdr == 2:
|
||||
i0_ff = Signal.like(pin_i0, name_suffix="_ff")
|
||||
i1_ff = Signal.like(pin_i1, name_suffix="_ff")
|
||||
get_dff(pin.i_clk, i0_ff, pin_i0)
|
||||
get_dff(pin.i_clk, i1_ff, pin_i1)
|
||||
if "o" in pin.dir and pin.xdr == 2:
|
||||
o1_ff = Signal.like(pin_o1, name_suffix="_ff")
|
||||
get_dff(pin.o_clk, pin_o1, o1_ff)
|
||||
|
||||
for bit in range(len(port)):
|
||||
io_args = [
|
||||
("io", "PACKAGE_PIN", port[bit]),
|
||||
*(("p", key, value) for key, value in attrs.items()),
|
||||
]
|
||||
|
||||
if "i" not in pin.dir:
|
||||
# If no input pin is requested, it is important to use a non-registered input pin
|
||||
# type, because an output-only pin would not have an input clock, and if its input
|
||||
# is configured as registered, this would prevent a co-located input-capable pin
|
||||
# from using an input clock.
|
||||
i_type = 0b01 # PIN_INPUT
|
||||
elif pin.xdr == 0:
|
||||
i_type = 0b01 # PIN_INPUT
|
||||
elif pin.xdr > 0:
|
||||
i_type = 0b00 # PIN_INPUT_REGISTERED aka PIN_INPUT_DDR
|
||||
if "o" not in pin.dir:
|
||||
o_type = 0b0000 # PIN_NO_OUTPUT
|
||||
elif pin.xdr == 0 and pin.dir == "o":
|
||||
o_type = 0b0110 # PIN_OUTPUT
|
||||
elif pin.xdr == 0:
|
||||
o_type = 0b1010 # PIN_OUTPUT_TRISTATE
|
||||
elif pin.xdr == 1 and pin.dir == "o":
|
||||
o_type = 0b0101 # PIN_OUTPUT_REGISTERED
|
||||
elif pin.xdr == 1:
|
||||
o_type = 0b1101 # PIN_OUTPUT_REGISTERED_ENABLE_REGISTERED
|
||||
elif pin.xdr == 2 and pin.dir == "o":
|
||||
o_type = 0b0100 # PIN_OUTPUT_DDR
|
||||
elif pin.xdr == 2:
|
||||
o_type = 0b1100 # PIN_OUTPUT_DDR_ENABLE_REGISTERED
|
||||
io_args.append(("p", "PIN_TYPE", C((o_type << 2) | i_type, 6)))
|
||||
|
||||
if hasattr(pin, "i_clk"):
|
||||
io_args.append(("i", "INPUT_CLK", pin.i_clk))
|
||||
if hasattr(pin, "o_clk"):
|
||||
io_args.append(("i", "OUTPUT_CLK", pin.o_clk))
|
||||
|
||||
if "i" in pin.dir:
|
||||
if pin.xdr == 0 and is_global_input:
|
||||
io_args.append(("o", "GLOBAL_BUFFER_OUTPUT", pin.i[bit]))
|
||||
elif pin.xdr < 2:
|
||||
io_args.append(("o", "D_IN_0", pin_i[bit]))
|
||||
elif pin.xdr == 2:
|
||||
# Re-register both inputs before they enter fabric. This increases hold time
|
||||
# to an entire cycle, and adds one cycle of latency.
|
||||
io_args.append(("o", "D_IN_0", i0_ff[bit]))
|
||||
io_args.append(("o", "D_IN_1", i1_ff[bit]))
|
||||
if "o" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
io_args.append(("i", "D_OUT_0", pin_o[bit]))
|
||||
elif pin.xdr == 2:
|
||||
# Re-register negedge output after it leaves fabric. This increases setup time
|
||||
# to an entire cycle, and doesn't add latency.
|
||||
io_args.append(("i", "D_OUT_0", pin_o0[bit]))
|
||||
io_args.append(("i", "D_OUT_1", o1_ff[bit]))
|
||||
|
||||
if pin.dir in ("oe", "io"):
|
||||
io_args.append(("i", "OUTPUT_ENABLE", pin.oe))
|
||||
|
||||
if is_global_input:
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("SB_GB_IO", *io_args)
|
||||
else:
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("SB_IO", *io_args)
|
||||
|
||||
def get_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
self._get_io_buffer(m, pin, port.io, attrs, i_invert=invert)
|
||||
return m
|
||||
|
||||
def get_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
self._get_io_buffer(m, pin, port.io, attrs, o_invert=invert)
|
||||
return m
|
||||
|
||||
def get_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
self._get_io_buffer(m, pin, port.io, attrs, o_invert=invert)
|
||||
return m
|
||||
|
||||
def get_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
self._get_io_buffer(m, pin, port.io, attrs, i_invert=invert, o_invert=invert)
|
||||
return m
|
||||
|
||||
def get_diff_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
# See comment in should_skip_port_component above.
|
||||
self._get_io_buffer(m, pin, port.p, attrs, i_invert=invert)
|
||||
return m
|
||||
|
||||
def get_diff_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
# Note that the non-inverting output pin is not driven the same way as a regular
|
||||
# output pin. The inverter introduces a delay, so for a non-inverting output pin,
|
||||
# an identical delay is introduced by instantiating a LUT. This makes the waveform
|
||||
# perfectly symmetric in the xdr=0 case.
|
||||
self._get_io_buffer(m, pin, port.p, attrs, o_invert= invert, invert_lut=True)
|
||||
self._get_io_buffer(m, pin, port.n, attrs, o_invert=not invert, invert_lut=True)
|
||||
return m
|
||||
|
||||
# Tristate bidirectional buffers are not supported on iCE40 because it requires external
|
||||
# termination, which is different for differential pins configured as inputs and outputs.
|
||||
|
||||
# CDC primitives are not currently specialized for iCE40. It is not known if iCECube2 supports
|
||||
# the necessary attributes; nextpnr-ice40 does not.
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.vendor.lattice_ice40, use amaranth.vendor.lattice_ice40",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
14
nmigen/vendor/lattice_machxo2.py
vendored
14
nmigen/vendor/lattice_machxo2.py
vendored
|
|
@ -1,11 +1,7 @@
|
|||
from amaranth.vendor.lattice_machxo2 import *
|
||||
from amaranth.vendor.lattice_machxo2 import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from .lattice_machxo_2_3l import LatticeMachXO2Platform
|
||||
|
||||
|
||||
__all__ = ["LatticeMachXO2Platform"]
|
||||
|
||||
|
||||
# TODO(nmigen-0.4): remove
|
||||
warnings.warn("instead of nmigen.vendor.lattice_machxo2, use nmigen.vendor.lattice_machxo_2_3l",
|
||||
warnings.warn("instead of nmigen.vendor.lattice_machxo2, use amaranth.vendor.lattice_machxo2",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
424
nmigen/vendor/lattice_machxo_2_3l.py
vendored
424
nmigen/vendor/lattice_machxo_2_3l.py
vendored
|
|
@ -1,421 +1,7 @@
|
|||
from abc import abstractproperty
|
||||
|
||||
from ..hdl import *
|
||||
from ..build import *
|
||||
from amaranth.vendor.lattice_machxo_2_3l import *
|
||||
from amaranth.vendor.lattice_machxo_2_3l import __all__
|
||||
|
||||
|
||||
__all__ = ["LatticeMachXO2Platform", "LatticeMachXO3LPlatform"]
|
||||
|
||||
|
||||
# MachXO2 and MachXO3L primitives are the same. Handle both using
|
||||
# one class and expose user-aliases for convenience.
|
||||
class LatticeMachXO2Or3LPlatform(TemplatedPlatform):
|
||||
"""
|
||||
Required tools:
|
||||
* ``pnmainc``
|
||||
* ``ddtcmd``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_Diamond``, if present. On Linux, diamond_env as provided by Diamond
|
||||
itself is a good candidate. On Windows, the following script (named ``diamond_env.bat``,
|
||||
for instance) is known to work::
|
||||
|
||||
@echo off
|
||||
set PATH=C:\\lscc\\diamond\\%DIAMOND_VERSION%\\bin\\nt64;%PATH%
|
||||
|
||||
Available overrides:
|
||||
* ``script_project``: inserts commands before ``prj_project save`` in Tcl script.
|
||||
* ``script_after_export``: inserts commands after ``prj_run Export`` in Tcl script.
|
||||
* ``add_preferences``: inserts commands at the end of the LPF file.
|
||||
* ``add_constraints``: inserts commands at the end of the XDC file.
|
||||
|
||||
Build products:
|
||||
* ``{{name}}_impl/{{name}}_impl.htm``: consolidated log.
|
||||
* ``{{name}}.jed``: JEDEC fuse file.
|
||||
* ``{{name}}.bit``: binary bitstream.
|
||||
* ``{{name}}.svf``: JTAG programming vector for FLASH programming.
|
||||
* ``{{name}}_flash.svf``: JTAG programming vector for FLASH programming.
|
||||
* ``{{name}}_sram.svf``: JTAG programming vector for SRAM programming.
|
||||
"""
|
||||
|
||||
toolchain = "Diamond"
|
||||
|
||||
device = abstractproperty()
|
||||
package = abstractproperty()
|
||||
speed = abstractproperty()
|
||||
grade = "C" # [C]ommercial, [I]ndustrial
|
||||
|
||||
required_tools = [
|
||||
"pnmainc",
|
||||
"ddtcmd"
|
||||
]
|
||||
file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"build_{{name}}.sh": r"""
|
||||
# {{autogenerated}}
|
||||
set -e{{verbose("x")}}
|
||||
if [ -z "$BASH" ] ; then exec /bin/bash "$0" "$@"; fi
|
||||
if [ -n "${{platform._toolchain_env_var}}" ]; then
|
||||
bindir=$(dirname "${{platform._toolchain_env_var}}")
|
||||
. "${{platform._toolchain_env_var}}"
|
||||
fi
|
||||
{{emit_commands("sh")}}
|
||||
""",
|
||||
"{{name}}.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_verilog()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.tcl": r"""
|
||||
prj_project new -name {{name}} -impl impl -impl_dir {{name}}_impl \
|
||||
-dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}} \
|
||||
-lpf {{name}}.lpf \
|
||||
-synthesis synplify
|
||||
{% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%}
|
||||
prj_src add {{file|tcl_escape}}
|
||||
{% endfor %}
|
||||
prj_src add {{name}}.v
|
||||
prj_impl option top {{name}}
|
||||
prj_src add {{name}}.sdc
|
||||
{{get_override("script_project")|default("# (script_project placeholder)")}}
|
||||
prj_project save
|
||||
prj_run Synthesis -impl impl
|
||||
prj_run Translate -impl impl
|
||||
prj_run Map -impl impl
|
||||
prj_run PAR -impl impl
|
||||
prj_run Export -impl impl -task Bitgen
|
||||
prj_run Export -impl impl -task Jedecgen
|
||||
{{get_override("script_after_export")|default("# (script_after_export placeholder)")}}
|
||||
""",
|
||||
"{{name}}.lpf": r"""
|
||||
# {{autogenerated}}
|
||||
BLOCK ASYNCPATHS;
|
||||
BLOCK RESETPATHS;
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
LOCATE COMP "{{port_name}}" SITE "{{pin_name}}";
|
||||
{% if attrs -%}
|
||||
IOBUF PORT "{{port_name}}"
|
||||
{%- for key, value in attrs.items() %} {{key}}={{value}}{% endfor %};
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{get_override("add_preferences")|default("# (add_preferences placeholder)")}}
|
||||
""",
|
||||
"{{name}}.sdc": r"""
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
{% if port_signal is not none -%}
|
||||
create_clock -name {{port_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_ports {{port_signal.name|tcl_escape}}]
|
||||
{% else -%}
|
||||
create_clock -name {{net_signal.name|tcl_escape}} -period {{1000000000/frequency}} [get_nets {{net_signal|hierarchy("/")|tcl_escape}}]
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
|
||||
""",
|
||||
}
|
||||
command_templates = [
|
||||
# These don't have any usable command-line option overrides.
|
||||
r"""
|
||||
{{invoke_tool("pnmainc")}}
|
||||
{{name}}.tcl
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ddtcmd")}}
|
||||
-oft -bit
|
||||
-if {{name}}_impl/{{name}}_impl.bit -of {{name}}.bit
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ddtcmd")}}
|
||||
-oft -jed
|
||||
-dev {{platform.device}}-{{platform.speed}}{{platform.package}}{{platform.grade}}
|
||||
-if {{name}}_impl/{{name}}_impl.jed -of {{name}}.jed
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ddtcmd")}}
|
||||
-oft -svfsingle -revd -op "FLASH Erase,Program,Verify"
|
||||
-if {{name}}_impl/{{name}}_impl.jed -of {{name}}_flash.svf
|
||||
""",
|
||||
# TODO(nmigen-0.4): remove
|
||||
r"""
|
||||
{% if syntax == "bat" -%}
|
||||
copy {{name}}_flash.svf {{name}}.svf
|
||||
{% else -%}
|
||||
cp {{name}}_flash.svf {{name}}.svf
|
||||
{% endif %}
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("ddtcmd")}}
|
||||
-oft -svfsingle -revd -op "SRAM Fast Program"
|
||||
-if {{name}}_impl/{{name}}_impl.bit -of {{name}}_sram.svf
|
||||
""",
|
||||
]
|
||||
|
||||
def create_missing_domain(self, name):
|
||||
# Lattice MachXO2/MachXO3L devices have two global set/reset signals: PUR, which is driven at
|
||||
# startup by the configuration logic and unconditionally resets every storage element,
|
||||
# and GSR, which is driven by user logic and each storage element may be configured as
|
||||
# affected or unaffected by GSR. PUR is purely asynchronous, so even though it is
|
||||
# a low-skew global network, its deassertion may violate a setup/hold constraint with
|
||||
# relation to a user clock. To avoid this, a GSR/SGSR instance should be driven
|
||||
# synchronized to user clock.
|
||||
if name == "sync" and self.default_clk is not None:
|
||||
clk_i = self.request(self.default_clk).i
|
||||
if self.default_rst is not None:
|
||||
rst_i = self.request(self.default_rst).i
|
||||
else:
|
||||
rst_i = Const(0)
|
||||
|
||||
gsr0 = Signal()
|
||||
gsr1 = Signal()
|
||||
m = Module()
|
||||
# There is no end-of-startup signal on MachXO2/MachXO3L, but PUR is released after IOB
|
||||
# enable, so a simple reset synchronizer (with PUR as the asynchronous reset) does the job.
|
||||
m.submodules += [
|
||||
Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=~rst_i, o_Q=gsr0),
|
||||
Instance("FD1S3AX", p_GSR="DISABLED", i_CK=clk_i, i_D=gsr0, o_Q=gsr1),
|
||||
# Although we already synchronize the reset input to user clock, SGSR has dedicated
|
||||
# clock routing to the center of the FPGA; use that just in case it turns out to be
|
||||
# more reliable. (None of this is documented.)
|
||||
Instance("SGSR", i_CLK=clk_i, i_GSR=gsr1),
|
||||
]
|
||||
# GSR implicitly connects to every appropriate storage element. As such, the sync
|
||||
# domain is reset-less; domains driven by other clocks would need to have dedicated
|
||||
# reset circuitry or otherwise meet setup/hold constraints on their own.
|
||||
m.domains += ClockDomain("sync", reset_less=True)
|
||||
m.d.comb += ClockSignal("sync").eq(clk_i)
|
||||
return m
|
||||
|
||||
_single_ended_io_types = [
|
||||
"PCI33", "LVTTL33", "LVCMOS33", "LVCMOS25", "LVCMOS18", "LVCMOS15", "LVCMOS12",
|
||||
"LVCMOS25R33", "LVCMOS18R33", "LVCMOS18R25", "LVCMOS15R33", "LVCMOS15R25", "LVCMOS12R33",
|
||||
"LVCMOS12R25", "LVCMOS10R33", "LVCMOS10R25", "SSTL25_I", "SSTL25_II", "SSTL18_I",
|
||||
"SSTL18_II", "HSTL18_I", "HSTL18_II",
|
||||
]
|
||||
_differential_io_types = [
|
||||
"LVDS25", "LVDS25E", "RSDS25", "RSDS25E", "BLVDS25", "BLVDS25E", "MLVDS25", "MLVDS25E",
|
||||
"LVPECL33", "LVPECL33E", "SSTL25D_I", "SSTL25D_II", "SSTL18D_I", "SSTL18D_II",
|
||||
"HSTL18D_I", "HSTL18D_II", "LVTTL33D", "LVCMOS33D", "LVCMOS25D", "LVCMOS18D", "LVCMOS15D",
|
||||
"LVCMOS12D", "MIPI",
|
||||
]
|
||||
|
||||
def should_skip_port_component(self, port, attrs, component):
|
||||
# On ECP5, a differential IO is placed by only instantiating an IO buffer primitive at
|
||||
# the PIOA or PIOC location, which is always the non-inverting pin.
|
||||
if attrs.get("IO_TYPE", "LVCMOS25") in self._differential_io_types and component == "n":
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_xdr_buffer(self, m, pin, *, i_invert=False, o_invert=False):
|
||||
def get_ireg(clk, d, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("IFS1P3DX",
|
||||
i_SCLK=clk,
|
||||
i_SP=Const(1),
|
||||
i_CD=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_oreg(clk, d, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("OFS1P3DX",
|
||||
i_SCLK=clk,
|
||||
i_SP=Const(1),
|
||||
i_CD=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_iddr(sclk, d, q0, q1):
|
||||
for bit in range(len(d)):
|
||||
m.submodules += Instance("IDDRXE",
|
||||
i_SCLK=sclk,
|
||||
i_RST=Const(0),
|
||||
i_D=d[bit],
|
||||
o_Q0=q0[bit], o_Q1=q1[bit]
|
||||
)
|
||||
|
||||
def get_oddr(sclk, d0, d1, q):
|
||||
for bit in range(len(q)):
|
||||
m.submodules += Instance("ODDRXE",
|
||||
i_SCLK=sclk,
|
||||
i_RST=Const(0),
|
||||
i_D0=d0[bit], i_D1=d1[bit],
|
||||
o_Q=q[bit]
|
||||
)
|
||||
|
||||
def get_ineg(z, invert):
|
||||
if invert:
|
||||
a = Signal.like(z, name_suffix="_n")
|
||||
m.d.comb += z.eq(~a)
|
||||
return a
|
||||
else:
|
||||
return z
|
||||
|
||||
def get_oneg(a, invert):
|
||||
if invert:
|
||||
z = Signal.like(a, name_suffix="_n")
|
||||
m.d.comb += z.eq(~a)
|
||||
return z
|
||||
else:
|
||||
return a
|
||||
|
||||
if "i" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
pin_i = get_ineg(pin.i, i_invert)
|
||||
elif pin.xdr == 2:
|
||||
pin_i0 = get_ineg(pin.i0, i_invert)
|
||||
pin_i1 = get_ineg(pin.i1, i_invert)
|
||||
if "o" in pin.dir:
|
||||
if pin.xdr < 2:
|
||||
pin_o = get_oneg(pin.o, o_invert)
|
||||
elif pin.xdr == 2:
|
||||
pin_o0 = get_oneg(pin.o0, o_invert)
|
||||
pin_o1 = get_oneg(pin.o1, o_invert)
|
||||
|
||||
i = o = t = None
|
||||
if "i" in pin.dir:
|
||||
i = Signal(pin.width, name="{}_xdr_i".format(pin.name))
|
||||
if "o" in pin.dir:
|
||||
o = Signal(pin.width, name="{}_xdr_o".format(pin.name))
|
||||
if pin.dir in ("oe", "io"):
|
||||
t = Signal(1, name="{}_xdr_t".format(pin.name))
|
||||
|
||||
if pin.xdr == 0:
|
||||
if "i" in pin.dir:
|
||||
i = pin_i
|
||||
if "o" in pin.dir:
|
||||
o = pin_o
|
||||
if pin.dir in ("oe", "io"):
|
||||
t = ~pin.oe
|
||||
elif pin.xdr == 1:
|
||||
# Note that currently nextpnr will not pack an FF (*FS1P3DX) into the PIO.
|
||||
if "i" in pin.dir:
|
||||
get_ireg(pin.i_clk, i, pin_i)
|
||||
if "o" in pin.dir:
|
||||
get_oreg(pin.o_clk, pin_o, o)
|
||||
if pin.dir in ("oe", "io"):
|
||||
get_oreg(pin.o_clk, ~pin.oe, t)
|
||||
elif pin.xdr == 2:
|
||||
if "i" in pin.dir:
|
||||
get_iddr(pin.i_clk, i, pin_i0, pin_i1)
|
||||
if "o" in pin.dir:
|
||||
get_oddr(pin.o_clk, pin_o0, pin_o1, o)
|
||||
if pin.dir in ("oe", "io"):
|
||||
# It looks like Diamond will not pack an OREG as a tristate register in a DDR PIO.
|
||||
# It is not clear what is the recommended set of primitives for this task.
|
||||
# Similarly, nextpnr will not pack anything as a tristate register in a DDR PIO.
|
||||
get_oreg(pin.o_clk, ~pin.oe, t)
|
||||
else:
|
||||
assert False
|
||||
|
||||
return (i, o, t)
|
||||
|
||||
def get_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert)
|
||||
for bit in range(len(port)):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB",
|
||||
i_I=port.io[bit],
|
||||
o_O=i[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(len(port)):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB",
|
||||
i_I=o[bit],
|
||||
o_O=port.io[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(len(port)):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ",
|
||||
i_T=t,
|
||||
i_I=o[bit],
|
||||
o_O=port.io[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("single-ended input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert)
|
||||
for bit in range(len(port)):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB",
|
||||
i_T=t,
|
||||
i_I=o[bit],
|
||||
o_O=i[bit],
|
||||
io_B=port.io[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_input(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("IB",
|
||||
i_I=port.p[bit],
|
||||
o_O=i[bit]
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OB",
|
||||
i_I=o[bit],
|
||||
o_O=port.p[bit],
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_tristate(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential tristate", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("OBZ",
|
||||
i_T=t,
|
||||
i_I=o[bit],
|
||||
o_O=port.p[bit],
|
||||
)
|
||||
return m
|
||||
|
||||
def get_diff_input_output(self, pin, port, attrs, invert):
|
||||
self._check_feature("differential input/output", pin, attrs,
|
||||
valid_xdrs=(0, 1, 2), valid_attrs=True)
|
||||
m = Module()
|
||||
i, o, t = self._get_xdr_buffer(m, pin, i_invert=invert, o_invert=invert)
|
||||
for bit in range(pin.width):
|
||||
m.submodules["{}_{}".format(pin.name, bit)] = Instance("BB",
|
||||
i_T=t,
|
||||
i_I=o[bit],
|
||||
o_O=i[bit],
|
||||
io_B=port.p[bit],
|
||||
)
|
||||
return m
|
||||
|
||||
# CDC primitives are not currently specialized for MachXO2/MachXO3L.
|
||||
|
||||
|
||||
LatticeMachXO2Platform = LatticeMachXO2Or3LPlatform
|
||||
LatticeMachXO3LPlatform = LatticeMachXO2Or3LPlatform
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.vendor.lattice_machxo_2_3l, use amaranth.vendor.lattice_machxo_2_3l",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
188
nmigen/vendor/quicklogic.py
vendored
188
nmigen/vendor/quicklogic.py
vendored
|
|
@ -1,185 +1,7 @@
|
|||
from abc import abstractproperty
|
||||
|
||||
from ..hdl import *
|
||||
from ..lib.cdc import ResetSynchronizer
|
||||
from ..build import *
|
||||
from amaranth.vendor.quicklogic import *
|
||||
from amaranth.vendor.quicklogic import __all__
|
||||
|
||||
|
||||
__all__ = ["QuicklogicPlatform"]
|
||||
|
||||
|
||||
class QuicklogicPlatform(TemplatedPlatform):
|
||||
"""
|
||||
Symbiflow toolchain
|
||||
-------------------
|
||||
|
||||
Required tools:
|
||||
* ``symbiflow_synth``
|
||||
* ``symbiflow_pack``
|
||||
* ``symbiflow_place``
|
||||
* ``symbiflow_route``
|
||||
* ``symbiflow_write_fasm``
|
||||
* ``symbiflow_write_bitstream``
|
||||
|
||||
The environment is populated by running the script specified in the environment variable
|
||||
``NMIGEN_ENV_QLSymbiflow``, if present.
|
||||
|
||||
Available overrides:
|
||||
* ``add_constraints``: inserts commands in XDC file.
|
||||
"""
|
||||
|
||||
device = abstractproperty()
|
||||
package = abstractproperty()
|
||||
|
||||
# Since the QuickLogic version of SymbiFlow toolchain is not upstreamed yet
|
||||
# we should distinguish the QuickLogic version from mainline one.
|
||||
# QuickLogic toolchain: https://github.com/QuickLogic-Corp/quicklogic-fpga-toolchain/releases
|
||||
toolchain = "QLSymbiflow"
|
||||
|
||||
required_tools = [
|
||||
"symbiflow_synth",
|
||||
"symbiflow_pack",
|
||||
"symbiflow_place",
|
||||
"symbiflow_route",
|
||||
"symbiflow_write_fasm",
|
||||
"symbiflow_write_bitstream",
|
||||
"symbiflow_write_openocd",
|
||||
]
|
||||
file_templates = {
|
||||
**TemplatedPlatform.build_script_templates,
|
||||
"{{name}}.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_verilog()}}
|
||||
""",
|
||||
"{{name}}.debug.v": r"""
|
||||
/* {{autogenerated}} */
|
||||
{{emit_debug_verilog()}}
|
||||
""",
|
||||
"{{name}}.pcf": r"""
|
||||
# {{autogenerated}}
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
set_io {{port_name}} {{pin_name}}
|
||||
{% endfor %}
|
||||
""",
|
||||
"{{name}}.xdc": r"""
|
||||
# {{autogenerated}}
|
||||
{% for port_name, pin_name, attrs in platform.iter_port_constraints_bits() -%}
|
||||
{% for attr_name, attr_value in attrs.items() -%}
|
||||
set_property {{attr_name}} {{attr_value}} [get_ports {{port_name|tcl_escape}} }]
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{{get_override("add_constraints")|default("# (add_constraints placeholder)")}}
|
||||
""",
|
||||
"{{name}}.sdc": r"""
|
||||
# {{autogenerated}}
|
||||
{% for net_signal, port_signal, frequency in platform.iter_clock_constraints() -%}
|
||||
{% if port_signal is not none -%}
|
||||
create_clock -period {{100000000/frequency}} {{port_signal.name|ascii_escape}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
"""
|
||||
}
|
||||
command_templates = [
|
||||
r"""
|
||||
{{invoke_tool("symbiflow_synth")}}
|
||||
-t {{name}}
|
||||
-v {% for file in platform.iter_files(".v", ".sv", ".vhd", ".vhdl") -%} {{file}} {% endfor %} {{name}}.v
|
||||
-d {{platform.device}}
|
||||
-p {{name}}.pcf
|
||||
-P {{platform.package}}
|
||||
-x {{name}}.xdc
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("symbiflow_pack")}}
|
||||
-e {{name}}.eblif
|
||||
-d {{platform.device}}
|
||||
-s {{name}}.sdc
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("symbiflow_place")}}
|
||||
-e {{name}}.eblif
|
||||
-d {{platform.device}}
|
||||
-p {{name}}.pcf
|
||||
-n {{name}}.net
|
||||
-P {{platform.package}}
|
||||
-s {{name}}.sdc
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("symbiflow_route")}}
|
||||
-e {{name}}.eblif
|
||||
-d {{platform.device}}
|
||||
-s {{name}}.sdc
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("symbiflow_write_fasm")}}
|
||||
-e {{name}}.eblif
|
||||
-d {{platform.device}}
|
||||
-s {{name}}.sdc
|
||||
""",
|
||||
r"""
|
||||
{{invoke_tool("symbiflow_write_bitstream")}}
|
||||
-f {{name}}.fasm
|
||||
-d {{platform.device}}
|
||||
-P {{platform.package}}
|
||||
-b {{name}}.bit
|
||||
""",
|
||||
# This should be `invoke_tool("symbiflow_write_openocd")`, but isn't because of a bug in
|
||||
# the QLSymbiflow v1.3.0 toolchain release.
|
||||
r"""
|
||||
python3 -m quicklogic_fasm.bitstream_to_openocd
|
||||
{{name}}.bit
|
||||
{{name}}.openocd
|
||||
--osc-freq {{platform.osc_freq}}
|
||||
--fpga-clk-divider {{platform.osc_div}}
|
||||
""",
|
||||
]
|
||||
|
||||
# Common logic
|
||||
|
||||
@property
|
||||
def default_clk_constraint(self):
|
||||
if self.default_clk == "sys_clk0":
|
||||
return Clock(self.osc_freq / self.osc_div)
|
||||
return super().default_clk_constraint
|
||||
|
||||
def add_clock_constraint(self, clock, frequency):
|
||||
super().add_clock_constraint(clock, frequency)
|
||||
clock.attrs["keep"] = "TRUE"
|
||||
|
||||
def create_missing_domain(self, name):
|
||||
if name == "sync" and self.default_clk is not None:
|
||||
m = Module()
|
||||
if self.default_clk == "sys_clk0":
|
||||
if not hasattr(self, "osc_div"):
|
||||
raise ValueError("OSC divider (osc_div) must be an integer between 2 "
|
||||
"and 512")
|
||||
if not isinstance(self.osc_div, int) or self.osc_div < 2 or self.osc_div > 512:
|
||||
raise ValueError("OSC divider (osc_div) must be an integer between 2 "
|
||||
"and 512, not {!r}"
|
||||
.format(self.osc_div))
|
||||
if not hasattr(self, "osc_freq"):
|
||||
raise ValueError("OSC frequency (osc_freq) must be an integer between 2100000 "
|
||||
"and 80000000")
|
||||
if not isinstance(self.osc_freq, int) or self.osc_freq < 2100000 or self.osc_freq > 80000000:
|
||||
raise ValueError("OSC frequency (osc_freq) must be an integer between 2100000 "
|
||||
"and 80000000, not {!r}"
|
||||
.format(self.osc_freq))
|
||||
clk_i = Signal()
|
||||
sys_clk0 = Signal()
|
||||
m.submodules += Instance("qlal4s3b_cell_macro",
|
||||
o_Sys_Clk0=sys_clk0)
|
||||
m.submodules += Instance("gclkbuff",
|
||||
o_A=sys_clk0,
|
||||
o_Z=clk_i)
|
||||
else:
|
||||
clk_i = self.request(self.default_clk).i
|
||||
|
||||
if self.default_rst is not None:
|
||||
rst_i = self.request(self.default_rst).i
|
||||
else:
|
||||
rst_i = Const(0)
|
||||
|
||||
m.domains += ClockDomain("sync")
|
||||
m.d.comb += ClockSignal("sync").eq(clk_i)
|
||||
m.submodules.reset_sync = ResetSynchronizer(rst_i, domain="sync")
|
||||
return m
|
||||
import warnings
|
||||
warnings.warn("instead of nmigen.vendor.quicklogic, use amaranth.vendor.quicklogic",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
1063
nmigen/vendor/xilinx.py
vendored
1063
nmigen/vendor/xilinx.py
vendored
File diff suppressed because it is too large
Load diff
18
nmigen/vendor/xilinx_7series.py
vendored
18
nmigen/vendor/xilinx_7series.py
vendored
|
|
@ -1,15 +1,7 @@
|
|||
from amaranth.vendor.xilinx_7series import *
|
||||
from amaranth.vendor.xilinx_7series import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from .xilinx import XilinxPlatform
|
||||
|
||||
|
||||
__all__ = ["Xilinx7SeriesPlatform"]
|
||||
|
||||
|
||||
Xilinx7SeriesPlatform = XilinxPlatform
|
||||
|
||||
|
||||
# TODO(nmigen-0.4): remove
|
||||
warnings.warn("instead of nmigen.vendor.xilinx_7series.Xilinx7SeriesPlatform, "
|
||||
"use nmigen.vendor.xilinx.XilinxPlatform",
|
||||
warnings.warn("instead of nmigen.vendor.xilinx_7series, use amaranth.vendor.xilinx_7series",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
19
nmigen/vendor/xilinx_spartan_3_6.py
vendored
19
nmigen/vendor/xilinx_spartan_3_6.py
vendored
|
|
@ -1,16 +1,7 @@
|
|||
from amaranth.vendor.xilinx_spartan_3_6 import *
|
||||
from amaranth.vendor.xilinx_spartan_3_6 import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from .xilinx import XilinxPlatform
|
||||
|
||||
|
||||
__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"]
|
||||
|
||||
|
||||
XilinxSpartan3APlatform = XilinxPlatform
|
||||
XilinxSpartan6Platform = XilinxPlatform
|
||||
|
||||
|
||||
# TODO(nmigen-0.4): remove
|
||||
warnings.warn("instead of nmigen.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and "
|
||||
".XilinxSpartan6Platform, use nmigen.vendor.xilinx.XilinxPlatform",
|
||||
warnings.warn("instead of nmigen.vendor.xilinx_spartan_3_6, use amaranth.vendor.xilinx_spartan_3_6",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
18
nmigen/vendor/xilinx_ultrascale.py
vendored
18
nmigen/vendor/xilinx_ultrascale.py
vendored
|
|
@ -1,15 +1,7 @@
|
|||
from amaranth.vendor.xilinx_ultrascale import *
|
||||
from amaranth.vendor.xilinx_ultrascale import __all__
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
from .xilinx import XilinxPlatform
|
||||
|
||||
|
||||
__all__ = ["XilinxUltraScalePlatform"]
|
||||
|
||||
|
||||
XilinxUltraScalePlatform = XilinxPlatform
|
||||
|
||||
|
||||
# TODO(nmigen-0.4): remove
|
||||
warnings.warn("instead of nmigen.vendor.xilinx_ultrascale.XilinxUltraScalePlatform, "
|
||||
"use nmigen.vendor.xilinx.XilinxPlatform",
|
||||
warnings.warn("instead of nmigen.vendor.xilinx_ultrascale, use amaranth.vendor.xilinx_ultrascale",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue