Skip to content

Commit ed624c0

Browse files
committed
hgq2 rename
1 parent e669e5b commit ed624c0

File tree

9 files changed

+46
-46
lines changed

9 files changed

+46
-46
lines changed

.pre-commit-config.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ repos:
4848
additional_dependencies: [flake8-bugbear, flake8-print]
4949
args: ['--max-line-length=125', # github viewer width
5050
'--extend-ignore=E203,T201', # E203 is not PEP8 compliant
51-
'--per-file-ignores=hls4ml/model/optimizer/passes/bit_exact.py:E741,hls4ml/converters/keras_v3/squark/_base.py:E741,__init__.py:F401',
51+
'--per-file-ignores=hls4ml/model/optimizer/passes/bit_exact.py:E741,__init__.py:F401',
5252
# i for #int w/o sign, I for #int w/ sign when massively processing bw conversions ......
5353
# ignore unused imports in __init__.py .....
5454
]
+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from . import conv # noqa: F401
22
from . import core # noqa: F401
33
from . import einsum_dense # noqa: F401
4-
from . import squark # noqa: F401
4+
from . import hgq2 # noqa: F401
55
from ._base import registry as layer_handlers
66

77
__all__ = ['layer_handlers']

hls4ml/converters/keras_v3/squark/_base.py renamed to hls4ml/converters/keras_v3/hgq2/_base.py

+26-26
Original file line numberDiff line numberDiff line change
@@ -9,26 +9,26 @@
99
from hls4ml.converters.keras_v3.einsum_dense import KV3EinsumDenseHandler
1010

1111
if TYPE_CHECKING:
12-
import squark
12+
import hgq
1313
from keras.api import KerasTensor, Layer
1414

1515

1616
def extract_fixed_quantizer_config(q, tensor: 'KerasTensor', is_input: bool) -> dict[str, Any]:
17+
from hgq.quantizer.internal.fixed_point_quantizer import FixedPointQuantizerKBI, FixedPointQuantizerKIF
1718
from keras.api.ops import convert_to_numpy
18-
from squark.quantizer.internal.fixed_point_quantizer import FixedPointQuantizerKBI, FixedPointQuantizerKIF
1919

2020
internal_q: FixedPointQuantizerKIF | FixedPointQuantizerKBI = q.quantizer
2121

2222
shape: tuple[int, ...] = tensor.shape[1:] # type: ignore
2323
if any([s is None for s in shape]):
2424
raise ValueError(f"Tensor {tensor.name} has at least one dimension with no fixed size")
2525
k, i, f = internal_q.kif
26-
k, B, I = k, k + i + f, k + i # type: ignore
27-
k, B, I = convert_to_numpy(k), convert_to_numpy(B), convert_to_numpy(I)
26+
k, B, I = k, k + i + f, k + i # type: ignore # noqa: E741
27+
k, B, I = convert_to_numpy(k), convert_to_numpy(B), convert_to_numpy(I) # noqa: E741
2828

2929
k = np.broadcast_to(k.astype(np.int8), (1,) + shape)
3030
B = np.broadcast_to(B.astype(np.int8), (1,) + shape)
31-
I = np.broadcast_to(I.astype(np.int8), (1,) + shape)
31+
I = np.broadcast_to(I.astype(np.int8), (1,) + shape) # noqa: E741
3232

3333
overflow_mode = internal_q.overflow_mode
3434
round_mode = internal_q.round_mode
@@ -61,7 +61,7 @@ def override_io_tensor_confs(confs: tuple[dict[str, Any], ...], overrides: dict[
6161
class SQLayerHandler(KerasV3LayerHandler):
6262
def __call__(
6363
self,
64-
layer: 'squark.layers.QLayerBase',
64+
layer: 'hgq.layers.QLayerBase',
6565
in_tensors: Sequence['KerasTensor'],
6666
out_tensors: Sequence['KerasTensor'],
6767
):
@@ -108,18 +108,18 @@ def load_weight(self, layer: 'Layer', key: str):
108108
@register
109109
class SQEinsumDenseHandler(SQLayerHandler, KV3EinsumDenseHandler):
110110
handles = (
111-
'squark.layers.core.einsum_dense.QEinsumDense',
112-
'squark.layers.einsum_dense_batchnorm.QEinsumDenseBatchnorm',
111+
'hgq.layers.core.einsum_dense.QEinsumDense',
112+
'hgq.layers.einsum_dense_batchnorm.QEinsumDenseBatchnorm',
113113
)
114114

115115

116116
@register
117117
class SQStandaloneQuantizerHandler(KerasV3LayerHandler):
118-
handles = ('squark.quantizer.quantizer.Quantizer',)
118+
handles = ('hgq.quantizer.quantizer.Quantizer',)
119119

120120
def handle(
121121
self,
122-
layer: 'squark.quantizer.Quantizer',
122+
layer: 'hgq.quantizer.Quantizer',
123123
in_tensors: Sequence['KerasTensor'],
124124
out_tensors: Sequence['KerasTensor'],
125125
):
@@ -131,19 +131,19 @@ def handle(
131131
@register
132132
class SQConvHandler(SQLayerHandler, KV3ConvHandler):
133133
handles = (
134-
'squark.layers.conv.QConv1D',
135-
'squark.layers.conv.QConv2D',
136-
# 'squark.layers.conv.QConv3D',
134+
'hgq.layers.conv.QConv1D',
135+
'hgq.layers.conv.QConv2D',
136+
# 'hgq.layers.conv.QConv3D',
137137
)
138138

139139

140140
@register
141141
class SQDenseHandler(SQLayerHandler, KV3DenseHandler):
142-
handles = ('squark.layers.core.dense.QDense', 'squark.layers.core.dense.QBatchNormDense')
142+
handles = ('hgq.layers.core.dense.QDense', 'hgq.layers.core.dense.QBatchNormDense')
143143

144144
def handle(
145145
self,
146-
layer: 'squark.layers.QDense',
146+
layer: 'hgq.layers.QDense',
147147
in_tensors: Sequence['KerasTensor'],
148148
out_tensors: Sequence['KerasTensor'],
149149
):
@@ -161,16 +161,16 @@ def handle(
161161

162162
@register
163163
class SQActivationHandler(SQLayerHandler, KV3ActivationHandler):
164-
handles = ('squark.layers.activation.QActivation',)
164+
handles = ('hgq.layers.activation.QActivation',)
165165

166166

167167
@register
168168
class SQBatchNormalizationHandler(SQLayerHandler):
169-
handles = ('squark.layers.batch_normalization.QBatchNormalization',)
169+
handles = ('hgq.layers.batch_normalization.QBatchNormalization',)
170170

171171
def handle(
172172
self,
173-
layer: 'squark.layers.QBatchNormalization',
173+
layer: 'hgq.layers.QBatchNormalization',
174174
in_tensors: Sequence['KerasTensor'],
175175
out_tensors: Sequence['KerasTensor'],
176176
):
@@ -193,18 +193,18 @@ def handle(
193193
@register
194194
class SQMergeHandler(SQLayerHandler, KV3MergeHandler):
195195
handles = (
196-
'squark.layers.ops.merge.QAdd',
197-
'squark.layers.ops.merge.QSubtract',
198-
'squark.layers.ops.merge.QMultiply',
199-
'squark.layers.ops.merge.QAverage',
200-
'squark.layers.ops.merge.QMaximum',
201-
'squark.layers.ops.merge.QMinimum',
202-
'squark.layers.ops.merge.QConcatenate',
196+
'hgq.layers.ops.merge.QAdd',
197+
'hgq.layers.ops.merge.QSubtract',
198+
'hgq.layers.ops.merge.QMultiply',
199+
'hgq.layers.ops.merge.QAverage',
200+
'hgq.layers.ops.merge.QMaximum',
201+
'hgq.layers.ops.merge.QMinimum',
202+
'hgq.layers.ops.merge.QConcatenate',
203203
)
204204

205205
def handle(
206206
self,
207-
layer: 'squark.layers.ops.merge.QMerge',
207+
layer: 'hgq.layers.ops.merge.QMerge',
208208
in_tensors: Sequence['KerasTensor'],
209209
out_tensors: Sequence['KerasTensor'],
210210
):

hls4ml/converters/keras_v3/squark/einsum.py renamed to hls4ml/converters/keras_v3/hgq2/einsum.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,17 @@
55
from ._base import SQLayerHandler, register
66

77
if typing.TYPE_CHECKING:
8-
import squark
8+
import hgq
99
from keras.api import KerasTensor
1010

1111

1212
@register
1313
class SQEinsumHandler(SQLayerHandler):
14-
handles = ('squark.layers.ops.einsum.QEinsum',)
14+
handles = ('hgq.layers.ops.einsum.QEinsum',)
1515

1616
def handle(
1717
self,
18-
layer: 'squark.layers.QEinsum',
18+
layer: 'hgq.layers.QEinsum',
1919
in_tensors: Sequence['KerasTensor'],
2020
out_tensors: Sequence['KerasTensor'],
2121
):

hls4ml/converters/keras_v3/squark/multi_head_attention.py renamed to hls4ml/converters/keras_v3/hgq2/multi_head_attention.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -9,22 +9,22 @@
99
from .softmax import SQSoftmaxHandler
1010

1111
if typing.TYPE_CHECKING:
12-
import squark
12+
import hgq
1313
from keras.api import KerasTensor
1414

1515

1616
@register
1717
class SQMultiHeadAttentionHandler(SQLayerHandler):
18-
handles = ('squark.layers.multi_head_attention.QMultiHeadAttention',)
18+
handles = ('hgq.layers.multi_head_attention.QMultiHeadAttention',)
1919

2020
def handle(
2121
self,
22-
layer: 'squark.layers.QMultiHeadAttention',
22+
layer: 'hgq.layers.QMultiHeadAttention',
2323
in_tensors: Sequence['KerasTensor'],
2424
out_tensors: Sequence['KerasTensor'],
2525
):
26+
from hgq.layers import QEinsum
2627
from keras import KerasTensor
27-
from squark.layers import QEinsum
2828

2929
assert len(in_tensors) in (3, 4), 'MultiHead layer must have 3 (Q, K, V) or 4 (Q, K, V, M) input tensors'
3030
assert len(out_tensors) == 1, 'Attention score output is not supported yet'

hls4ml/converters/keras_v3/squark/softmax.py renamed to hls4ml/converters/keras_v3/hgq2/softmax.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
from ._base import SQLayerHandler, register
88

99
if typing.TYPE_CHECKING:
10-
import squark
10+
import hgq
11+
from hgq.quantizer.internal import FixedPointQuantizerBase
1112
from keras.api import KerasTensor
12-
from squark.quantizer.internal import FixedPointQuantizerBase
1313

1414

1515
def fixed_quantizer_to_hls4ml_t(q: 'FixedPointQuantizerBase', take_max=False):
@@ -40,11 +40,11 @@ def fixed_quantizer_to_hls4ml_t(q: 'FixedPointQuantizerBase', take_max=False):
4040

4141
@register
4242
class SQSoftmaxHandler(SQLayerHandler):
43-
handles = ('squark.layers.softmax.QSoftmax',)
43+
handles = ('hgq.layers.softmax.QSoftmax',)
4444

4545
def handle(
4646
self,
47-
layer: 'squark.layers.QSoftmax',
47+
layer: 'hgq.layers.QSoftmax',
4848
in_tensors: Sequence['KerasTensor'],
4949
out_tensors: Sequence['KerasTensor'],
5050
):
@@ -66,8 +66,8 @@ def handle(
6666
n_in: int = prod(in_tensors[0].shape[axs[0] : axs[-1] + 1]) # type: ignore
6767
ax = -1 # if n_inner == 1 else 999 # 999 as placeholder
6868

69+
from hgq.quantizer.internal import FixedPointQuantizerBase
6970
from keras import ops
70-
from squark.quantizer.internal import FixedPointQuantizerBase
7171

7272
impl = 'stable' if layer.stable else 'latency'
7373

hls4ml/converters/keras_v3/squark/unary_lut.py renamed to hls4ml/converters/keras_v3/hgq2/unary_lut.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from ._base import KerasV3LayerHandler, SQLayerHandler, register
1010

1111
if typing.TYPE_CHECKING:
12-
import squark
12+
import hgq
1313
from keras.api import KerasTensor
1414

1515
from decimal import Decimal
@@ -19,16 +19,16 @@
1919

2020
@register
2121
class SQUnaryLUTHandler(SQLayerHandler, KerasV3LayerHandler):
22-
handles = ('squark.layers.activation.QUnaryFunctionLUT',)
22+
handles = ('hgq.layers.activation.QUnaryFunctionLUT',)
2323

2424
def handle(
2525
self,
26-
layer: 'squark.layers.QUnaryFunctionLUT',
26+
layer: 'hgq.layers.QUnaryFunctionLUT',
2727
in_tensors: Sequence['KerasTensor'],
2828
out_tensors: Sequence['KerasTensor'],
2929
):
30+
from hgq.quantizer.internal import FixedPointQuantizerBase, FloatPointQuantizer
3031
from keras import ops
31-
from squark.quantizer.internal import FixedPointQuantizerBase, FloatPointQuantizer
3232

3333
if not layer.enable_iq and not layer.enable_oq:
3434
raise ValueError('Currently only support input_quantizer enabled UnaryFunctionLUT layer')

test/pytest/test_qeinsum.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111
pytest.skip('Only keras v3 is supported for now', allow_module_level=True)
1212

1313
try:
14-
from squark.layers import QEinsum
15-
from squark.utils import trace_mode
14+
from hgq.layers import QEinsum
15+
from hgq.utils import trace_mode
1616
except ImportError:
1717
pytest.skip('s-quark is not installed', allow_module_level=True)
1818

0 commit comments

Comments
 (0)