9
9
from hls4ml .converters .keras_v3 .einsum_dense import KV3EinsumDenseHandler
10
10
11
11
if TYPE_CHECKING :
12
- import squark
12
+ import hgq
13
13
from keras .api import KerasTensor , Layer
14
14
15
15
16
16
def extract_fixed_quantizer_config (q , tensor : 'KerasTensor' , is_input : bool ) -> dict [str , Any ]:
17
+ from hgq .quantizer .internal .fixed_point_quantizer import FixedPointQuantizerKBI , FixedPointQuantizerKIF
17
18
from keras .api .ops import convert_to_numpy
18
- from squark .quantizer .internal .fixed_point_quantizer import FixedPointQuantizerKBI , FixedPointQuantizerKIF
19
19
20
20
internal_q : FixedPointQuantizerKIF | FixedPointQuantizerKBI = q .quantizer
21
21
22
22
shape : tuple [int , ...] = tensor .shape [1 :] # type: ignore
23
23
if any ([s is None for s in shape ]):
24
24
raise ValueError (f"Tensor { tensor .name } has at least one dimension with no fixed size" )
25
25
k , i , f = internal_q .kif
26
- k , B , I = k , k + i + f , k + i # type: ignore
27
- k , B , I = convert_to_numpy (k ), convert_to_numpy (B ), convert_to_numpy (I )
26
+ k , B , I = k , k + i + f , k + i # type: ignore # noqa: E741
27
+ k , B , I = convert_to_numpy (k ), convert_to_numpy (B ), convert_to_numpy (I ) # noqa: E741
28
28
29
29
k = np .broadcast_to (k .astype (np .int8 ), (1 ,) + shape )
30
30
B = np .broadcast_to (B .astype (np .int8 ), (1 ,) + shape )
31
- I = np .broadcast_to (I .astype (np .int8 ), (1 ,) + shape )
31
+ I = np .broadcast_to (I .astype (np .int8 ), (1 ,) + shape ) # noqa: E741
32
32
33
33
overflow_mode = internal_q .overflow_mode
34
34
round_mode = internal_q .round_mode
@@ -61,7 +61,7 @@ def override_io_tensor_confs(confs: tuple[dict[str, Any], ...], overrides: dict[
61
61
class SQLayerHandler (KerasV3LayerHandler ):
62
62
def __call__ (
63
63
self ,
64
- layer : 'squark .layers.QLayerBase' ,
64
+ layer : 'hgq .layers.QLayerBase' ,
65
65
in_tensors : Sequence ['KerasTensor' ],
66
66
out_tensors : Sequence ['KerasTensor' ],
67
67
):
@@ -108,18 +108,18 @@ def load_weight(self, layer: 'Layer', key: str):
108
108
@register
109
109
class SQEinsumDenseHandler (SQLayerHandler , KV3EinsumDenseHandler ):
110
110
handles = (
111
- 'squark .layers.core.einsum_dense.QEinsumDense' ,
112
- 'squark .layers.einsum_dense_batchnorm.QEinsumDenseBatchnorm' ,
111
+ 'hgq .layers.core.einsum_dense.QEinsumDense' ,
112
+ 'hgq .layers.einsum_dense_batchnorm.QEinsumDenseBatchnorm' ,
113
113
)
114
114
115
115
116
116
@register
117
117
class SQStandaloneQuantizerHandler (KerasV3LayerHandler ):
118
- handles = ('squark .quantizer.quantizer.Quantizer' ,)
118
+ handles = ('hgq .quantizer.quantizer.Quantizer' ,)
119
119
120
120
def handle (
121
121
self ,
122
- layer : 'squark .quantizer.Quantizer' ,
122
+ layer : 'hgq .quantizer.Quantizer' ,
123
123
in_tensors : Sequence ['KerasTensor' ],
124
124
out_tensors : Sequence ['KerasTensor' ],
125
125
):
@@ -131,19 +131,19 @@ def handle(
131
131
@register
132
132
class SQConvHandler (SQLayerHandler , KV3ConvHandler ):
133
133
handles = (
134
- 'squark .layers.conv.QConv1D' ,
135
- 'squark .layers.conv.QConv2D' ,
136
- # 'squark .layers.conv.QConv3D',
134
+ 'hgq .layers.conv.QConv1D' ,
135
+ 'hgq .layers.conv.QConv2D' ,
136
+ # 'hgq .layers.conv.QConv3D',
137
137
)
138
138
139
139
140
140
@register
141
141
class SQDenseHandler (SQLayerHandler , KV3DenseHandler ):
142
- handles = ('squark .layers.core.dense.QDense' , 'squark .layers.core.dense.QBatchNormDense' )
142
+ handles = ('hgq .layers.core.dense.QDense' , 'hgq .layers.core.dense.QBatchNormDense' )
143
143
144
144
def handle (
145
145
self ,
146
- layer : 'squark .layers.QDense' ,
146
+ layer : 'hgq .layers.QDense' ,
147
147
in_tensors : Sequence ['KerasTensor' ],
148
148
out_tensors : Sequence ['KerasTensor' ],
149
149
):
@@ -161,16 +161,16 @@ def handle(
161
161
162
162
@register
163
163
class SQActivationHandler (SQLayerHandler , KV3ActivationHandler ):
164
- handles = ('squark .layers.activation.QActivation' ,)
164
+ handles = ('hgq .layers.activation.QActivation' ,)
165
165
166
166
167
167
@register
168
168
class SQBatchNormalizationHandler (SQLayerHandler ):
169
- handles = ('squark .layers.batch_normalization.QBatchNormalization' ,)
169
+ handles = ('hgq .layers.batch_normalization.QBatchNormalization' ,)
170
170
171
171
def handle (
172
172
self ,
173
- layer : 'squark .layers.QBatchNormalization' ,
173
+ layer : 'hgq .layers.QBatchNormalization' ,
174
174
in_tensors : Sequence ['KerasTensor' ],
175
175
out_tensors : Sequence ['KerasTensor' ],
176
176
):
@@ -193,18 +193,18 @@ def handle(
193
193
@register
194
194
class SQMergeHandler (SQLayerHandler , KV3MergeHandler ):
195
195
handles = (
196
- 'squark .layers.ops.merge.QAdd' ,
197
- 'squark .layers.ops.merge.QSubtract' ,
198
- 'squark .layers.ops.merge.QMultiply' ,
199
- 'squark .layers.ops.merge.QAverage' ,
200
- 'squark .layers.ops.merge.QMaximum' ,
201
- 'squark .layers.ops.merge.QMinimum' ,
202
- 'squark .layers.ops.merge.QConcatenate' ,
196
+ 'hgq .layers.ops.merge.QAdd' ,
197
+ 'hgq .layers.ops.merge.QSubtract' ,
198
+ 'hgq .layers.ops.merge.QMultiply' ,
199
+ 'hgq .layers.ops.merge.QAverage' ,
200
+ 'hgq .layers.ops.merge.QMaximum' ,
201
+ 'hgq .layers.ops.merge.QMinimum' ,
202
+ 'hgq .layers.ops.merge.QConcatenate' ,
203
203
)
204
204
205
205
def handle (
206
206
self ,
207
- layer : 'squark .layers.ops.merge.QMerge' ,
207
+ layer : 'hgq .layers.ops.merge.QMerge' ,
208
208
in_tensors : Sequence ['KerasTensor' ],
209
209
out_tensors : Sequence ['KerasTensor' ],
210
210
):
0 commit comments