This repository was archived by the owner on Oct 25, 2024. It is now read-only.
File tree 5 files changed +5
-5
lines changed
examples/huggingface/pytorch
code-generation/quantization
text-generation/quantization
5 files changed +5
-5
lines changed Original file line number Diff line number Diff line change @@ -61,7 +61,7 @@ ENV COMPOSE_DOCKER_CLI_BUILD=0
61
61
# Install torch and intel-extension-for-pytorch 2.1
62
62
RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
63
63
RUN python3 -m pip install intel-extension-for-pytorch intel-extension-for-transformers optimum
64
- RUN python3 -m pip install git+https://github.com/huggingface/optimum-intel.git@f95dea1ae8966dee4d75d622e7b2468c514ba02d
64
+ RUN python3 -m pip install git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
65
65
RUN python3 -m pip install git+https://github.com/bigcode-project/bigcode-evaluation-harness@0d84db85f9ff971fa23a187a3347b7f59af288dc
66
66
67
67
# Standard requirements
Original file line number Diff line number Diff line change @@ -10,6 +10,6 @@ transformers >= 4.35.0
10
10
tiktoken #code_gen
11
11
neural-compressor
12
12
intel_extension_for_pytorch == 2.3.0
13
- optimum-intel
13
+ git+https://github.com/huggingface/ optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
14
14
auto-round == 0.2
15
15
git+https://github.com/bigcode-project/bigcode-evaluation-harness@094c7cc197d13a53c19303865e2056f1c7488ac1
Original file line number Diff line number Diff line change @@ -7,7 +7,7 @@ sentencepiece != 0.1.92
7
7
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
8
8
torch==2.1.0a0
9
9
transformers
10
- optimum-intel
10
+ git+https://github.com/huggingface/ optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
11
11
bitsandbytes #baichuan
12
12
transformers_stream_generator
13
13
tiktoken #qwen
Original file line number Diff line number Diff line change @@ -7,7 +7,7 @@ sentencepiece != 0.1.92
7
7
torch==2.3.0+cpu
8
8
transformers==4.38.1
9
9
intel_extension_for_pytorch==2.3.0
10
- optimum-intel
10
+ git+https://github.com/huggingface/ optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
11
11
bitsandbytes #baichuan
12
12
transformers_stream_generator
13
13
tiktoken #qwen
Original file line number Diff line number Diff line change @@ -7,6 +7,7 @@ datasets==2.16.1
7
7
einops
8
8
evaluate
9
9
gguf
10
+ git+https://github.com/huggingface/optimum-intel.git@50d867c13b22c22eda451ddb67bddb8159670f85
10
11
git+https://github.com/intel/neural-compressor.git
11
12
git+https://github.com/intel/neural-speed.git
12
13
intel-extension-for-pytorch == 2.3.0
16
17
nlpaug == 1.1.9
17
18
onnx
18
19
onnxruntime
19
- optimum-intel
20
20
peft == 0.6.2
21
21
py-cpuinfo
22
22
sacremoses
You can’t perform that action at this time.
0 commit comments