Skip to content

Commit f28fcee

Browse files
yiliu30chensuyue
andauthored
Remove 1x API (#1865)
Signed-off-by: yiliu30 <[email protected]> Co-authored-by: chen, suyue <[email protected]>
1 parent 1386ac5 commit f28fcee

File tree

225 files changed

+73
-62056
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

225 files changed

+73
-62056
lines changed

.azure-pipelines/model-test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ parameters:
5757
- name: PyTorchModelList
5858
type: object
5959
default:
60-
- resnet18
60+
# - resnet18
6161
- resnet18_fx
6262
- name: ONNXModelList
6363
type: object

.github/checkgroup.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ subprojects:
5151
- "Model-Test"
5252
- "Model-Test (Generate Report GenerateReport)"
5353
- "Model-Test (Run ONNX Model resnet50-v1-12)"
54-
- "Model-Test (Run PyTorch Model resnet18)"
5554
- "Model-Test (Run PyTorch Model resnet18_fx)"
5655
- "Model-Test (Run TensorFlow Model resnet50v1.5)"
5756
- "Model-Test (Run TensorFlow Model ssd_resnet50_v1)"

neural_compressor/adaptor/keras.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
import numpy as np
2525
import yaml
2626

27-
from ..conf.dotdict import deep_get
2827
from ..data.dataloaders.base_dataloader import BaseDataLoader
2928
from ..utils import logger
3029
from ..utils.utility import (
@@ -34,6 +33,7 @@
3433
Dequantize,
3534
LazyImport,
3635
Statistics,
36+
deep_get,
3737
dump_elapsed_time,
3838
singleton,
3939
version1_lt_version2,

neural_compressor/adaptor/mxnet.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -446,13 +446,6 @@ def _one_shot_query(self):
446446
raise ValueError(
447447
"Please check if the format of {} follows Neural Compressor yaml schema.".format(self.cfg)
448448
)
449-
self._update_cfg_with_usr_definition()
450-
451-
def _update_cfg_with_usr_definition(self):
452-
from neural_compressor.conf.pythonic_config import mxnet_config
453-
454-
if mxnet_config.precisions is not None:
455-
self.cur_config["precisions"]["names"] = ",".join(mxnet_config.precisions)
456449

457450
def _get_specified_version_cfg(self, data):
458451
"""Get the configuration for the current runtime.

neural_compressor/adaptor/onnxrt.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2242,15 +2242,6 @@ def _one_shot_query(self):
22422242
raise ValueError(
22432243
"Please check if the format of {} follows Neural Compressor yaml schema.".format(self.cfg)
22442244
)
2245-
self._update_cfg_with_usr_definition()
2246-
2247-
def _update_cfg_with_usr_definition(self):
2248-
from neural_compressor.conf.pythonic_config import onnxruntime_config
2249-
2250-
if onnxruntime_config.graph_optimization_level is not None:
2251-
self.cur_config["graph_optimization"]["level"] = onnxruntime_config.graph_optimization_level
2252-
if onnxruntime_config.precisions is not None:
2253-
self.cur_config["precisions"]["names"] = ",".join(onnxruntime_config.precisions)
22542245

22552246
def _get_specified_version_cfg(self, data): # pragma: no cover
22562247
"""Get the configuration for the current runtime.

neural_compressor/adaptor/pytorch.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5102,13 +5102,6 @@ def _one_shot_query(self):
51025102
self.cur_config = self.cur_config[self.device]
51035103
elif "cpu" in self.cur_config:
51045104
self.cur_config = self.cur_config["cpu"]
5105-
self._update_cfg_with_usr_definition()
5106-
5107-
def _update_cfg_with_usr_definition(self):
5108-
from neural_compressor.conf.pythonic_config import pytorch_config
5109-
5110-
if pytorch_config.precisions is not None:
5111-
self.cur_config["precisions"]["names"] = ",".join(pytorch_config.precisions)
51125105

51135106
def get_quantization_capability(self, datatype="int8"):
51145107
"""Get the supported op types' quantization capability.

neural_compressor/adaptor/tensorflow.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
import numpy as np
2525
import yaml
2626

27-
from ..conf.dotdict import deep_get
2827
from ..data.dataloaders.base_dataloader import BaseDataLoader
2928
from ..utils import logger
3029
from ..utils.utility import (
@@ -34,6 +33,7 @@
3433
Dequantize,
3534
LazyImport,
3635
Statistics,
36+
deep_get,
3737
dump_elapsed_time,
3838
singleton,
3939
version1_eq_version2,
@@ -2204,14 +2204,6 @@ def _one_shot_query(self):
22042204
raise ValueError(
22052205
"Please check if the format of {} follows Neural Compressor yaml schema.".format(self.cfg)
22062206
)
2207-
self._update_cfg_with_usr_definition()
2208-
2209-
def _update_cfg_with_usr_definition(self):
2210-
"""Add user defined precision configuration."""
2211-
from neural_compressor.conf.pythonic_config import tensorflow_config
2212-
2213-
if tensorflow_config.precisions is not None:
2214-
self.cur_config["precisions"]["names"] = ",".join(tensorflow_config.precisions)
22152207

22162208
def get_version(self):
22172209
"""Get the current backend version information.

neural_compressor/adaptor/tf_utils/graph_converter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,13 @@
2828
from tensorflow.python.platform import gfile
2929

3030
from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node import InsertPrintMinMaxNode
31-
from neural_compressor.conf.dotdict import deep_get
3231
from neural_compressor.model import Model
3332
from neural_compressor.model.tensorflow_model import TensorflowSavedModelModel
3433
from neural_compressor.utils.utility import (
3534
CaptureOutputToFile,
3635
CpuInfo,
3736
combine_histogram,
37+
deep_get,
3838
get_all_fp32_data,
3939
get_tensor_histogram,
4040
)

neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@
2424
import tensorflow as tf
2525
from tensorflow.python.platform import gfile
2626

27-
from neural_compressor.conf.dotdict import deep_get
2827
from neural_compressor.model import Model
28+
from neural_compressor.utils.utility import deep_get
2929

3030
from .graph_rewriter.bf16.bf16_convert import BF16Convert
3131
from .graph_rewriter.generic.fold_batch_norm import FoldBatchNormNodesOptimizer

neural_compressor/compression/pruner/utils.py

Lines changed: 11 additions & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -18,90 +18,20 @@
1818
# limitations under the License.
1919

2020
import re
21-
from collections import UserDict, defaultdict
21+
from collections import UserDict
2222

2323
import numpy as np
24-
import yaml
2524

26-
from ...config import WeightPruningConfig as WeightPruningConf
27-
28-
try:
29-
from neural_compressor.conf.config import Pruner
30-
from neural_compressor.conf.dotdict import DotDict
31-
from neural_compressor.utils import logger
32-
33-
from ...conf.config import PrunerV2
34-
from ...conf.pythonic_config import WeightPruningConfig
35-
from ...utils.utility import LazyImport
36-
37-
torch = LazyImport("torch")
38-
nn = LazyImport("torch.nn")
39-
F = LazyImport("torch.nn.functional")
40-
tf = LazyImport("tensorflow")
41-
except:
42-
import logging
43-
44-
import tensorflow as tf
45-
import torch
46-
import torch.nn as nn
47-
import torch.nn.functional as F
48-
49-
from .dot_dict import DotDict # #TODO
50-
51-
logger = logging.getLogger(__name__)
52-
from .schema_check import PrunerV2
53-
54-
class WeightPruningConfig:
55-
"""Similar to torch optimizer's interface."""
56-
57-
def __init__(
58-
self,
59-
pruning_configs=[{}], ##empty dict will use global values
60-
target_sparsity=0.9,
61-
pruning_type="snip_momentum",
62-
pattern="4x1",
63-
op_names=[],
64-
excluded_op_names=[],
65-
start_step=0,
66-
end_step=0,
67-
pruning_scope="global",
68-
pruning_frequency=1,
69-
min_sparsity_ratio_per_op=0.0,
70-
max_sparsity_ratio_per_op=0.98,
71-
sparsity_decay_type="exp",
72-
pruning_op_types=["Conv", "Linear"],
73-
**kwargs,
74-
):
75-
"""Init a WeightPruningConfig object."""
76-
self.pruning_configs = pruning_configs
77-
self._weight_compression = DotDict(
78-
{
79-
"target_sparsity": target_sparsity,
80-
"pruning_type": pruning_type,
81-
"pattern": pattern,
82-
"op_names": op_names,
83-
"excluded_op_names": excluded_op_names, ##global only
84-
"start_step": start_step,
85-
"end_step": end_step,
86-
"pruning_scope": pruning_scope,
87-
"pruning_frequency": pruning_frequency,
88-
"min_sparsity_ratio_per_op": min_sparsity_ratio_per_op,
89-
"max_sparsity_ratio_per_op": max_sparsity_ratio_per_op,
90-
"sparsity_decay_type": sparsity_decay_type,
91-
"pruning_op_types": pruning_op_types,
92-
}
93-
)
94-
self._weight_compression.update(kwargs)
25+
from neural_compressor.utils import logger
26+
from neural_compressor.utils.utility import DotDict
9527

96-
@property
97-
def weight_compression(self):
98-
"""Get weight_compression."""
99-
return self._weight_compression
28+
from ...config import WeightPruningConfig as WeightPruningConf
29+
from ...utils.utility import LazyImport
10030

101-
@weight_compression.setter
102-
def weight_compression(self, weight_compression):
103-
"""Set weight_compression."""
104-
self._weight_compression = weight_compression
31+
torch = LazyImport("torch")
32+
nn = LazyImport("torch.nn")
33+
F = LazyImport("torch.nn.functional")
34+
tf = LazyImport("tensorflow")
10535

10636

10737
def get_sparsity_ratio(pruners, model):
@@ -423,14 +353,10 @@ def check_key_validity_prunerv2(template_config, usr_cfg_dict):
423353
for obj in user_config:
424354
if isinstance(obj, dict):
425355
check_key_validity_dict(template_config, obj)
426-
elif isinstance(obj, PrunerV2):
427-
check_key_validity_prunerv2(template_config, obj)
428356

429357
# single pruner, weightconfig or yaml
430358
elif isinstance(user_config, dict):
431359
check_key_validity_dict(template_config, user_config)
432-
elif isinstance(user_config, PrunerV2):
433-
check_key_validity_prunerv2(template_config, user_config)
434360
return
435361

436362

@@ -470,7 +396,7 @@ def process_and_check_config(val):
470396
default_config.update(default_global_config)
471397
default_config.update(default_local_config)
472398
default_config.update(params_default_config)
473-
if isinstance(val, WeightPruningConfig) or isinstance(val, WeightPruningConf):
399+
if isinstance(val, WeightPruningConf):
474400
global_configs = val.weight_compression
475401
pruning_configs = val.pruning_configs
476402
check_key_validity(default_config, pruning_configs)
@@ -494,21 +420,7 @@ def process_config(config):
494420
Returns:
495421
A config dict object.
496422
"""
497-
if isinstance(config, str):
498-
try:
499-
with open(config, "r") as f:
500-
content = f.read()
501-
val = yaml.safe_load(content)
502-
##schema.validate(val)
503-
return process_and_check_config(val)
504-
except FileNotFoundError as f:
505-
logger.error("{}.".format(f))
506-
raise RuntimeError("The yaml file is not exist. Please check the file name or path.")
507-
except Exception as e:
508-
logger.error("{}.".format(e))
509-
raise RuntimeError("The yaml file format is not correct. Please refer to document.")
510-
511-
if isinstance(config, WeightPruningConfig) or isinstance(config, WeightPruningConf):
423+
if isinstance(config, WeightPruningConf):
512424
return process_and_check_config(config)
513425
else:
514426
assert False, f"not supported type {config}"
@@ -618,25 +530,6 @@ def parse_to_prune_tf(config, model):
618530
return new_modules
619531

620532

621-
def generate_pruner_config(info):
622-
"""Generate pruner config object from prune information.
623-
624-
Args:
625-
info: A dotdict that saves prune information.
626-
627-
Returns:
628-
pruner: A pruner config object.
629-
"""
630-
return Pruner(
631-
initial_sparsity=0,
632-
method=info.method,
633-
target_sparsity=info.target_sparsity,
634-
start_epoch=info.start_step,
635-
end_epoch=info.end_step,
636-
update_frequency=info.pruning_frequency,
637-
)
638-
639-
640533
def get_layers(model):
641534
"""Get each layer's name and its module.
642535

0 commit comments

Comments
 (0)