hdl.mem: use read_port(domain="comb") for asynchronous read ports.
This avoids the absurdity of the combination of arguments that is read_port(domain="sync", synchronous=True). Fixes #116.
This commit is contained in:
parent
f75a0163f9
commit
94e8f479a5
|
@ -52,10 +52,10 @@ class Memory:
|
||||||
raise TypeError("Memory initialization value at address {:x}: {}"
|
raise TypeError("Memory initialization value at address {:x}: {}"
|
||||||
.format(addr, e)) from None
|
.format(addr, e)) from None
|
||||||
|
|
||||||
def read_port(self, domain="sync", synchronous=True, transparent=True):
|
def read_port(self, domain="sync", transparent=True):
|
||||||
if not synchronous and not transparent:
|
if domain == "comb" and not transparent:
|
||||||
raise ValueError("Read port cannot be simultaneously asynchronous and non-transparent")
|
raise ValueError("Read port cannot be simultaneously asynchronous and non-transparent")
|
||||||
return ReadPort(self, domain, synchronous, transparent)
|
return ReadPort(self, domain, transparent)
|
||||||
|
|
||||||
def write_port(self, domain="sync", priority=0, granularity=None):
|
def write_port(self, domain="sync", priority=0, granularity=None):
|
||||||
if granularity is None:
|
if granularity is None:
|
||||||
|
@ -77,17 +77,16 @@ class Memory:
|
||||||
|
|
||||||
|
|
||||||
class ReadPort(Elaboratable):
|
class ReadPort(Elaboratable):
|
||||||
def __init__(self, memory, domain, synchronous, transparent):
|
def __init__(self, memory, domain, transparent):
|
||||||
self.memory = memory
|
self.memory = memory
|
||||||
self.domain = domain
|
self.domain = domain
|
||||||
self.synchronous = synchronous
|
|
||||||
self.transparent = transparent
|
self.transparent = transparent
|
||||||
|
|
||||||
self.addr = Signal(max=memory.depth,
|
self.addr = Signal(max=memory.depth,
|
||||||
name="{}_r_addr".format(memory.name))
|
name="{}_r_addr".format(memory.name))
|
||||||
self.data = Signal(memory.width,
|
self.data = Signal(memory.width,
|
||||||
name="{}_r_data".format(memory.name))
|
name="{}_r_data".format(memory.name))
|
||||||
if synchronous and not transparent:
|
if self.domain != "comb" and not transparent:
|
||||||
self.en = Signal(name="{}_r_en".format(memory.name))
|
self.en = Signal(name="{}_r_en".format(memory.name))
|
||||||
else:
|
else:
|
||||||
self.en = Const(1)
|
self.en = Const(1)
|
||||||
|
@ -97,15 +96,19 @@ class ReadPort(Elaboratable):
|
||||||
p_MEMID=self.memory,
|
p_MEMID=self.memory,
|
||||||
p_ABITS=self.addr.nbits,
|
p_ABITS=self.addr.nbits,
|
||||||
p_WIDTH=self.data.nbits,
|
p_WIDTH=self.data.nbits,
|
||||||
p_CLK_ENABLE=self.synchronous,
|
p_CLK_ENABLE=self.domain != "comb",
|
||||||
p_CLK_POLARITY=1,
|
p_CLK_POLARITY=1,
|
||||||
p_TRANSPARENT=self.transparent,
|
p_TRANSPARENT=self.transparent,
|
||||||
i_CLK=ClockSignal(self.domain) if self.synchronous else Const(0),
|
i_CLK=ClockSignal(self.domain) if self.domain != "comb" else Const(0),
|
||||||
i_EN=self.en,
|
i_EN=self.en,
|
||||||
i_ADDR=self.addr,
|
i_ADDR=self.addr,
|
||||||
o_DATA=self.data,
|
o_DATA=self.data,
|
||||||
)
|
)
|
||||||
if self.synchronous and not self.transparent:
|
if self.domain == "comb":
|
||||||
|
# Asynchronous port
|
||||||
|
f.add_statements(self.data.eq(self.memory._array[self.addr]))
|
||||||
|
f.add_driver(self.data)
|
||||||
|
elif not self.transparent:
|
||||||
# Synchronous, read-before-write port
|
# Synchronous, read-before-write port
|
||||||
f.add_statements(
|
f.add_statements(
|
||||||
Switch(self.en, {
|
Switch(self.en, {
|
||||||
|
@ -113,7 +116,7 @@ class ReadPort(Elaboratable):
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
f.add_driver(self.data, self.domain)
|
f.add_driver(self.data, self.domain)
|
||||||
elif self.synchronous:
|
else:
|
||||||
# Synchronous, write-through port
|
# Synchronous, write-through port
|
||||||
# This model is a bit unconventional. We model transparent ports as asynchronous ports
|
# This model is a bit unconventional. We model transparent ports as asynchronous ports
|
||||||
# that are latched when the clock is high. This isn't exactly correct, but it is very
|
# that are latched when the clock is high. This isn't exactly correct, but it is very
|
||||||
|
@ -134,10 +137,6 @@ class ReadPort(Elaboratable):
|
||||||
)
|
)
|
||||||
f.add_driver(latch_addr, self.domain)
|
f.add_driver(latch_addr, self.domain)
|
||||||
f.add_driver(self.data)
|
f.add_driver(self.data)
|
||||||
else:
|
|
||||||
# Asynchronous port
|
|
||||||
f.add_statements(self.data.eq(self.memory._array[self.addr]))
|
|
||||||
f.add_driver(self.data)
|
|
||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -151,7 +151,7 @@ class SyncFIFO(Elaboratable, FIFOInterface):
|
||||||
storage = Memory(self.width, self.depth)
|
storage = Memory(self.width, self.depth)
|
||||||
wrport = m.submodules.wrport = storage.write_port()
|
wrport = m.submodules.wrport = storage.write_port()
|
||||||
rdport = m.submodules.rdport = storage.read_port(
|
rdport = m.submodules.rdport = storage.read_port(
|
||||||
synchronous=not self.fwft, transparent=self.fwft)
|
domain="comb" if self.fwft else "sync", transparent=self.fwft)
|
||||||
produce = Signal(max=self.depth)
|
produce = Signal(max=self.depth)
|
||||||
consume = Signal(max=self.depth)
|
consume = Signal(max=self.depth)
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@ class MemoryTestCase(FHDLTestCase):
|
||||||
rdport = mem.read_port()
|
rdport = mem.read_port()
|
||||||
self.assertEqual(rdport.memory, mem)
|
self.assertEqual(rdport.memory, mem)
|
||||||
self.assertEqual(rdport.domain, "sync")
|
self.assertEqual(rdport.domain, "sync")
|
||||||
self.assertEqual(rdport.synchronous, True)
|
|
||||||
self.assertEqual(rdport.transparent, True)
|
self.assertEqual(rdport.transparent, True)
|
||||||
self.assertEqual(len(rdport.addr), 2)
|
self.assertEqual(len(rdport.addr), 2)
|
||||||
self.assertEqual(len(rdport.data), 8)
|
self.assertEqual(len(rdport.data), 8)
|
||||||
|
@ -58,17 +57,15 @@ class MemoryTestCase(FHDLTestCase):
|
||||||
rdport = mem.read_port(transparent=False)
|
rdport = mem.read_port(transparent=False)
|
||||||
self.assertEqual(rdport.memory, mem)
|
self.assertEqual(rdport.memory, mem)
|
||||||
self.assertEqual(rdport.domain, "sync")
|
self.assertEqual(rdport.domain, "sync")
|
||||||
self.assertEqual(rdport.synchronous, True)
|
|
||||||
self.assertEqual(rdport.transparent, False)
|
self.assertEqual(rdport.transparent, False)
|
||||||
self.assertEqual(len(rdport.en), 1)
|
self.assertEqual(len(rdport.en), 1)
|
||||||
self.assertIsInstance(rdport.en, Signal)
|
self.assertIsInstance(rdport.en, Signal)
|
||||||
|
|
||||||
def test_read_port_asynchronous(self):
|
def test_read_port_asynchronous(self):
|
||||||
mem = Memory(width=8, depth=4)
|
mem = Memory(width=8, depth=4)
|
||||||
rdport = mem.read_port(synchronous=False)
|
rdport = mem.read_port(domain="comb")
|
||||||
self.assertEqual(rdport.memory, mem)
|
self.assertEqual(rdport.memory, mem)
|
||||||
self.assertEqual(rdport.domain, "sync")
|
self.assertEqual(rdport.domain, "comb")
|
||||||
self.assertEqual(rdport.synchronous, False)
|
|
||||||
self.assertEqual(rdport.transparent, True)
|
self.assertEqual(rdport.transparent, True)
|
||||||
self.assertEqual(len(rdport.en), 1)
|
self.assertEqual(len(rdport.en), 1)
|
||||||
self.assertIsInstance(rdport.en, Const)
|
self.assertIsInstance(rdport.en, Const)
|
||||||
|
@ -78,7 +75,7 @@ class MemoryTestCase(FHDLTestCase):
|
||||||
mem = Memory(width=8, depth=4)
|
mem = Memory(width=8, depth=4)
|
||||||
with self.assertRaises(ValueError,
|
with self.assertRaises(ValueError,
|
||||||
msg="Read port cannot be simultaneously asynchronous and non-transparent"):
|
msg="Read port cannot be simultaneously asynchronous and non-transparent"):
|
||||||
mem.read_port(synchronous=False, transparent=False)
|
mem.read_port(domain="comb", transparent=False)
|
||||||
|
|
||||||
def test_write_port(self):
|
def test_write_port(self):
|
||||||
mem = Memory(width=8, depth=4)
|
mem = Memory(width=8, depth=4)
|
||||||
|
|
|
@ -59,7 +59,7 @@ class FIFOModel(Elaboratable, FIFOInterface):
|
||||||
|
|
||||||
storage = Memory(self.width, self.depth)
|
storage = Memory(self.width, self.depth)
|
||||||
wrport = m.submodules.wrport = storage.write_port(domain=self.wdomain)
|
wrport = m.submodules.wrport = storage.write_port(domain=self.wdomain)
|
||||||
rdport = m.submodules.rdport = storage.read_port (synchronous=False)
|
rdport = m.submodules.rdport = storage.read_port (domain="comb")
|
||||||
|
|
||||||
produce = Signal(max=self.depth)
|
produce = Signal(max=self.depth)
|
||||||
consume = Signal(max=self.depth)
|
consume = Signal(max=self.depth)
|
||||||
|
|
|
@ -431,7 +431,8 @@ class SimulatorIntegrationTestCase(FHDLTestCase):
|
||||||
self.m = Module()
|
self.m = Module()
|
||||||
self.memory = Memory(width=8, depth=4, init=[0xaa, 0x55])
|
self.memory = Memory(width=8, depth=4, init=[0xaa, 0x55])
|
||||||
self.m.submodules.rdport = self.rdport = \
|
self.m.submodules.rdport = self.rdport = \
|
||||||
self.memory.read_port(synchronous=rd_synchronous, transparent=rd_transparent)
|
self.memory.read_port(domain="sync" if rd_synchronous else "comb",
|
||||||
|
transparent=rd_transparent)
|
||||||
self.m.submodules.wrport = self.wrport = \
|
self.m.submodules.wrport = self.wrport = \
|
||||||
self.memory.write_port(granularity=wr_granularity)
|
self.memory.write_port(granularity=wr_granularity)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue