Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit c576211

Browse files
authored
fix optimum-intel version (#1654)
Signed-off-by: changwangss <[email protected]>
1 parent 590024d commit c576211

File tree

5 files changed

+5
-5
lines changed

5 files changed

+5
-5
lines changed

examples/huggingface/pytorch/code-generation/quantization/Dockerfile-multiple

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ ENV COMPOSE_DOCKER_CLI_BUILD=0
6161
# Install torch and intel-extension-for-pytorch 2.1
6262
RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
6363
RUN python3 -m pip install intel-extension-for-pytorch intel-extension-for-transformers optimum
64-
RUN python3 -m pip install git+https://github.com/huggingface/optimum-intel.git@f95dea1ae8966dee4d75d622e7b2468c514ba02d
64+
RUN python3 -m pip install git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
6565
RUN python3 -m pip install git+https://github.com/bigcode-project/bigcode-evaluation-harness@0d84db85f9ff971fa23a187a3347b7f59af288dc
6666

6767
# Standard requirements

examples/huggingface/pytorch/code-generation/quantization/requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,6 @@ transformers >= 4.35.0
1010
tiktoken #code_gen
1111
neural-compressor
1212
intel_extension_for_pytorch==2.3.0
13-
optimum-intel
13+
git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
1414
auto-round==0.2
1515
git+https://github.com/bigcode-project/bigcode-evaluation-harness@094c7cc197d13a53c19303865e2056f1c7488ac1

examples/huggingface/pytorch/text-generation/quantization/requirements_GPU.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ sentencepiece != 0.1.92
77
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
88
torch==2.1.0a0
99
transformers
10-
optimum-intel
10+
git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
1111
bitsandbytes #baichuan
1212
transformers_stream_generator
1313
tiktoken #qwen

examples/huggingface/pytorch/text-generation/quantization/requirements_sq.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ sentencepiece != 0.1.92
77
torch==2.3.0+cpu
88
transformers==4.38.1
99
intel_extension_for_pytorch==2.3.0
10-
optimum-intel
10+
git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
1111
bitsandbytes #baichuan
1212
transformers_stream_generator
1313
tiktoken #qwen

tests/requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ datasets==2.16.1
77
einops
88
evaluate
99
gguf
10+
git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
1011
git+https://github.com/intel/neural-compressor.git
1112
git+https://github.com/intel/neural-speed.git
1213
intel-extension-for-pytorch==2.3.0
@@ -16,7 +17,6 @@ mlflow
1617
nlpaug==1.1.9
1718
onnx
1819
onnxruntime
19-
optimum-intel
2020
peft==0.6.2
2121
py-cpuinfo
2222
sacremoses

0 commit comments

Comments
 (0)