File tree Expand file tree Collapse file tree 15 files changed +27
-7
lines changed
HF-Transformers-AutoModels/Model
PyTorch-Models/Model/qwen-vl Expand file tree Collapse file tree 15 files changed +27
-7
lines changed Original file line number Diff line number Diff line change @@ -20,6 +20,7 @@ conda activate llm
2020# install the latest ipex-llm nightly build with 'all' option
2121pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu
2222
23+ pip install " transformers<4.37.0"
2324pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation
2425
2526```
@@ -32,6 +33,7 @@ conda activate llm
3233
3334pip install --pre --upgrade ipex-llm[all]
3435
36+ pip install "transformers<4.37.0"
3537pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib
3638
3739```
Original file line number Diff line number Diff line change @@ -22,6 +22,8 @@ conda activate llm
2222
2323# install the latest ipex-llm nightly build with 'all' option
2424pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu
25+
26+ pip install " transformers<4.37.0"
2527pip install tiktoken einops transformers_stream_generator # additional package required for Qwen-7B-Chat to conduct generation
2628```
2729
@@ -32,6 +34,8 @@ conda create -n llm python=3.11
3234conda activate llm
3335
3436pip install --pre --upgrade ipex-llm[all]
37+
38+ pip install "transformers<4.37.0"
3539pip install tiktoken einops transformers_stream_generator
3640```
3741
Original file line number Diff line number Diff line change @@ -19,6 +19,8 @@ conda activate llm
1919
2020# install the latest ipex-llm nightly build with 'all' option
2121pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu
22+
23+ pip install " transformers<4.37.0"
2224pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation
2325```
2426
@@ -29,6 +31,8 @@ conda create -n llm python=3.11
2931conda activate llm
3032
3133pip install --pre --upgrade ipex-llm[all]
34+
35+ pip install "transformers<4.37.0"
3236pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib
3337```
3438
Original file line number Diff line number Diff line change @@ -15,6 +15,7 @@ conda activate llm
1515# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1616pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1717
18+ pip install " transformers<4.37.0"
1819pip install tiktoken einops transformers_stream_generator # additional package required for Qwen-7B-Chat to conduct generation
1920```
2021
@@ -27,6 +28,7 @@ conda activate llm
2728# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
2829pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2930
31+ pip install " transformers<4.37.0"
3032pip install tiktoken einops transformers_stream_generator # additional package required for Qwen-7B-Chat to conduct generation
3133```
3234
Original file line number Diff line number Diff line change @@ -15,6 +15,7 @@ conda activate llm
1515# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1616pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1717
18+ pip install " transformers<4.37.0"
1819pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation
1920```
2021
@@ -27,6 +28,7 @@ conda activate llm
2728# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
2829pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2930
31+ pip install " transformers<4.37.0"
3032pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation
3133```
3234
Original file line number Diff line number Diff line change @@ -17,6 +17,7 @@ conda activate llm
1717# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1818pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1919
20+ pip install transformers==4.36.2
2021pip install librosa soundfile datasets
2122pip install accelerate
2223pip install SpeechRecognition sentencepiece colorama
@@ -33,6 +34,7 @@ conda activate llm
3334# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
3435pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
3536
37+ pip install transformers==4.36.2
3638pip install librosa soundfile datasets
3739pip install accelerate
3840pip install SpeechRecognition sentencepiece colorama
Original file line number Diff line number Diff line change @@ -16,6 +16,7 @@ conda activate llm
1616# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1717pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1818
19+ pip install transformers==4.36.2
1920pip install datasets soundfile librosa # required by audio processing
2021```
2122
@@ -28,6 +29,7 @@ conda activate llm
2829# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
2930pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
3031
32+ pip install transformers==4.36.2
3133pip install datasets soundfile librosa # required by audio processing
3234```
3335
Original file line number Diff line number Diff line change @@ -16,7 +16,6 @@ conda activate llm
1616pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1717
1818pip install einops # install dependencies required by llava
19- pip install transformers==4.36.2
2019
2120git clone https://github.com/haotian-liu/LLaVA.git # clone the llava libary
2221cp generate.py ./LLaVA/ # copy our example to the LLaVA folder
@@ -34,7 +33,6 @@ conda activate llm
3433pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
3534
3635pip install einops # install dependencies required by llava
37- pip install transformers==4.36.2
3836
3937git clone https://github.com/haotian-liu/LLaVA.git # clone the llava libary
4038copy generate.py .\L LaVA\ # copy our example to the LLaVA folder
Original file line number Diff line number Diff line change @@ -15,6 +15,7 @@ conda activate llm
1515# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1616pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1717
18+ pip install " transformers<4.37.0"
1819pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation
1920```
2021
@@ -27,6 +28,7 @@ conda activate llm
2728# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
2829pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2930
31+ pip install " transformers<4.37.0"
3032pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation
3133```
3234
Original file line number Diff line number Diff line change @@ -15,6 +15,7 @@ conda activate llm
1515# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1616pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
1717
18+ pip install transformers==4.36.2
1819pip install " datasets<2.18" soundfile # additional package required for SpeechT5 to conduct generation
1920```
2021
@@ -27,6 +28,7 @@ conda activate llm
2728# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
2829pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2930
31+ pip install transformers==4.36.2
3032pip install " datasets<2.18" soundfile # additional package required for SpeechT5 to conduct generation
3133```
3234
You can’t perform that action at this time.
0 commit comments