seanpedrickcase commited on
Commit
387b6ea
·
1 Parent(s): 9bd035b

Updated package requirements

Browse files
requirements.txt CHANGED
@@ -1,24 +1,24 @@
1
- pandas==2.3.0
2
- gradio==5.36.2
3
- transformers==4.53.2
4
- spaces==0.37.0
5
- boto3==1.39.4
6
- pyarrow==20.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
- google-genai==1.21.1
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
  # Torch and Llama CPP Python
17
  # GPU
18
- # torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124
19
- # https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.12-cu124/llama_cpp_python-0.3.12-cp310-cp310-linux_x86_64.whl # Specify exact llama_cpp for cuda compatibility on Hugging Face
20
  #
21
  # CPU only:
22
- torch==2.7.1 --extra-index-url https://download.pytorch.org/whl/cpu
23
- llama-cpp-python==0.3.12 - will work on Hugging Face spaces, but only CPU
24
 
 
1
+ pandas==2.3.1
2
+ gradio==5.42.0
3
+ transformers==4.55.2
4
+ spaces==0.40.0
5
+ boto3==1.40.11
6
+ pyarrow==21.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
+ google-genai==1.30.0
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
  # Torch and Llama CPP Python
17
  # GPU
18
+ # torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124 # Latest compatible with CUDA 12.4
19
+ # https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.16-cu124/llama_cpp_python-0.3.16-cp310-cp310-linux_x86_64.whl # Specify exact llama_cpp for cuda compatibility on Hugging Face
20
  #
21
  # CPU only:
22
+ torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cpu
23
+ llama-cpp-python==0.3.16 # will work on Hugging Face spaces, but only CPU
24
 
requirements_aws.txt CHANGED
@@ -1,16 +1,17 @@
1
- pandas==2.3.0
2
- gradio==5.36.2
3
- transformers==4.53.2
4
- spaces==0.37.0
5
- boto3==1.39.4
6
- pyarrow==20.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
- google-genai==1.21.1
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
- llama-cpp-python==0.3.12
 
 
1
+ pandas==2.3.1
2
+ gradio==5.42.0
3
+ transformers==4.55.2
4
+ spaces==0.40.0
5
+ boto3==1.40.11
6
+ pyarrow==21.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
+ google-genai==1.30.0
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
+ torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124 # Latest compatible with CUDA 12.4
17
+ llama-cpp-python==0.3.16
requirements_cpu_win.txt CHANGED
@@ -1,18 +1,18 @@
1
- pandas==2.3.0
2
- gradio==5.36.2
3
- transformers==4.53.2
4
- spaces==0.37.0
5
- boto3==1.39.4
6
- pyarrow==20.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
- google-genai==1.21.1
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
- # Following will only work with Gemma 2
17
- torch==2.7.1 --extra-index-url https://download.pytorch.org/whl/cpu
18
  https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.2/llama_cpp_python-0.3.2-cp311-cp311-win_amd64.whl
 
1
+ pandas==2.3.1
2
+ gradio==5.42.0
3
+ transformers==4.55.2
4
+ spaces==0.40.0
5
+ boto3==1.40.11
6
+ pyarrow==21.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
+ google-genai==1.30.0
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
+ # Following llama-cpp-python installation is that latest that has a wheel for Windows. will only work with Gemma 2
17
+ torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cpu # Latest compatible with CUDA 12.4
18
  https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.2/llama_cpp_python-0.3.2-cp311-cp311-win_amd64.whl
requirements_gpu.txt CHANGED
@@ -1,23 +1,24 @@
1
- pandas==2.3.0
2
- gradio==5.36.2
3
- transformers==4.53.2
4
- spaces==0.37.0
5
- boto3==1.39.4
6
- pyarrow==20.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
- google-genai==1.21.1
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
  #
17
  # Torch and Llama CPP Python
18
- torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124
19
  # For Linux:
20
- https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.12-cu124/llama_cpp_python-0.3.12-cp311-cp311-linux_x86_64.whl
21
- # For Windows, or if above doesn't work. See 'windows_install_llama-cpp-python.txt' if you have trouble
22
- # llama-cpp-python==0.3.12 -C cmake.args="-DGGML_CUDA=on"
 
23
 
 
1
+ pandas==2.3.1
2
+ gradio==5.42.0
3
+ transformers==4.55.2
4
+ spaces==0.40.0
5
+ boto3==1.40.11
6
+ pyarrow==21.0.0
7
  openpyxl==3.1.5
8
  markdown==3.7
9
  tabulate==0.9.0
10
  lxml==5.3.0
11
+ google-genai==1.30.0
12
  html5lib==1.1
13
  beautifulsoup4==4.12.3
14
  rapidfuzz==3.13.0
15
  python-dotenv==1.1.0
16
  #
17
  # Torch and Llama CPP Python
18
+ torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124 # Latest compatible with CUDA 12.4
19
  # For Linux:
20
+ #https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.16-cu124/llama_cpp_python-0.3.16-cp311-cp311-linux_x86_64.whl
21
+ # For Windows:
22
+ llama-cpp-python==0.3.16 -C cmake.args="-DGGML_CUDA=on"
23
+ # If above doesn't work for Windows, try looking at'windows_install_llama-cpp-python.txt' i
24
 
windows_install_llama-cpp-python.txt CHANGED
@@ -3,12 +3,12 @@
3
  set PKG_CONFIG_PATH=C:\<path-to-openblas>\OpenBLAS\lib\pkgconfig # Set this in environment variables
4
 
5
 
6
- pip install llama-cpp-python==0.3.12 --force-reinstall --verbose --no-cache-dir -Ccmake.args="-DGGML_BLAS=ON;-DGGML_BLAS_VENDOR=OpenBLAS;-DBLAS_INCLUDE_DIRS=C:/<path-to-openblas>/OpenBLAS/include;-DBLAS_LIBRARIES=C:/<path-to-openblas>/OpenBLAS/lib/libopenblas.lib"
7
  ---
8
 
9
  # With CUDA
10
 
11
- pip install llama-cpp-python==0.3.12 --force-reinstall --no-cache-dir --verbose -C cmake.args="-DGGML_CUDA=on"
12
 
13
 
14
  ---
@@ -99,11 +99,11 @@ Open the "Developer Command Prompt for VS" from your Start Menu. This is importa
99
  set PKG_CONFIG_PATH=C:\<path-to-openblas>\OpenBLAS\lib\pkgconfig # Set this in environment variables
100
 
101
 
102
- pip install llama-cpp-python==0.3.9 --force-reinstall --verbose --no-cache-dir -Ccmake.args="-DGGML_BLAS=ON;-DGGML_BLAS_VENDOR=OpenBLAS;-DBLAS_INCLUDE_DIRS=C:/<path-to-openblas>/OpenBLAS/include;-DBLAS_LIBRARIES=C:/<path-to-openblas>/OpenBLAS/lib/libopenblas.lib"
103
 
104
  ## With Cuda
105
 
106
- ### Make sure you are using the x64 version of Developer command tools ###
107
 
108
  Use NVIDIA GPU (cuBLAS): If you have an NVIDIA GPU, using cuBLAS is often easier because the CUDA Toolkit installer handles most of the setup.
109
 
@@ -114,6 +114,6 @@ Run the install command specifying cuBLAS:
114
 
115
  set PKG_CONFIG_PATH=C:\<path-to-openblas>\OpenBLAS\lib\pkgconfig # Set this in environment variables
116
 
117
- pip install llama-cpp-python==0.3.12 --force-reinstall --no-cache-dir --verbose -C cmake.args="-DGGML_CUDA=on"
118
 
119
 
 
3
  set PKG_CONFIG_PATH=C:\<path-to-openblas>\OpenBLAS\lib\pkgconfig # Set this in environment variables
4
 
5
 
6
+ pip install llama-cpp-python==0.3.16 --force-reinstall --verbose --no-cache-dir -Ccmake.args="-DGGML_BLAS=ON;-DGGML_BLAS_VENDOR=OpenBLAS;-DBLAS_INCLUDE_DIRS=C:/<path-to-openblas>/OpenBLAS/include;-DBLAS_LIBRARIES=C:/<path-to-openblas>/OpenBLAS/lib/libopenblas.lib"
7
  ---
8
 
9
  # With CUDA
10
 
11
+ pip install llama-cpp-python==0.3.16 --force-reinstall --no-cache-dir --verbose -C cmake.args="-DGGML_CUDA=on"
12
 
13
 
14
  ---
 
99
  set PKG_CONFIG_PATH=C:\<path-to-openblas>\OpenBLAS\lib\pkgconfig # Set this in environment variables
100
 
101
 
102
+ pip install llama-cpp-python==0.3.16 --force-reinstall --verbose --no-cache-dir -Ccmake.args="-DGGML_BLAS=ON;-DGGML_BLAS_VENDOR=OpenBLAS;-DBLAS_INCLUDE_DIRS=C:/<path-to-openblas>/OpenBLAS/include;-DBLAS_LIBRARIES=C:/<path-to-openblas>/OpenBLAS/lib/libopenblas.lib"
103
 
104
  ## With Cuda
105
 
106
+ ### Make sure you are using the x64 version of Developer command tools, e.g. 'x64 Native Tools Command Prompt for VS 2022' ###
107
 
108
  Use NVIDIA GPU (cuBLAS): If you have an NVIDIA GPU, using cuBLAS is often easier because the CUDA Toolkit installer handles most of the setup.
109
 
 
114
 
115
  set PKG_CONFIG_PATH=C:\<path-to-openblas>\OpenBLAS\lib\pkgconfig # Set this in environment variables
116
 
117
+ pip install llama-cpp-python==0.3.16 --force-reinstall --no-cache-dir --verbose -C cmake.args="-DGGML_CUDA=on"
118
 
119