We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 72a8d64 commit d59d246Copy full SHA for d59d246
hls4ml/model/optimizer/passes/bit_exact.py
@@ -574,6 +574,13 @@ def match(self, node: Layer):
574
575
def transform(self, model, node: Layer):
576
out_layers: list[FixedPointQuantizer] = get_output_layers(node)
577
+
578
+ if len(out_layers) == 0: # Input connected to nothing
579
+ new_type = to_hls4ml_fixed(0, 0, 1, f'{node.name}_t')
580
+ node.get_output_variable().type = new_type
581
+ node.model.config.layer_name_precision[node.name] = str(new_type)
582
+ return False
583
584
if not all(isinstance(l, FixedPointQuantizer) for l in out_layers):
585
warn(f'Input {node.name} has unhandled high precision. Consider setting it manually before synthesising.')
586
return False
0 commit comments