morriszms commited on
Commit
b5fbc6b
Β·
verified Β·
1 Parent(s): bc6fc44

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Llama2-MedTuned-7b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Llama2-MedTuned-7b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Llama2-MedTuned-7b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Llama2-MedTuned-7b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Llama2-MedTuned-7b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Llama2-MedTuned-7b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Llama2-MedTuned-7b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ Llama2-MedTuned-7b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ Llama2-MedTuned-7b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ Llama2-MedTuned-7b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ Llama2-MedTuned-7b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ Llama2-MedTuned-7b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Llama2-MedTuned-7b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a197c67e76520966114f22142ab82fc4b742dfa009a7d0af1a20c0ff6b8b6b1
3
+ size 2532864128
Llama2-MedTuned-7b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf3ca24534715a7e5dc69aebc0e14c5e77d5c569e640ff64d119e67e0e7b4cfe
3
+ size 3597111424
Llama2-MedTuned-7b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83afec8d42207bac7f15f4f258ba42ff914aacca15f71e9e6f96f89e83a294a6
3
+ size 3298005120
Llama2-MedTuned-7b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed961e310484bf64ddb0e8e288c4182c0fb67d421933902443108cea4ed4537
3
+ size 2948305024
Llama2-MedTuned-7b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aea5cfd9d7fcd115c38aef9aa3d1d2beb5531b8ccb64f4f9fef98f6c28069399
3
+ size 3825807488
Llama2-MedTuned-7b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fbeb77ffab14ae1d63b085be0b152b2a67f963f53c2ecbf323b30d5884c09ff
3
+ size 4081004672
Llama2-MedTuned-7b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adaa975542517b79e53c9c9baaab21034fc7b03c5c1ed0f62b6d53d671aa2406
3
+ size 3856740480
Llama2-MedTuned-7b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:224ab60b83e238f91e05c4f08f63cdae9085ceeec3cb819db1930b5ddbd19cd7
3
+ size 4651692160
Llama2-MedTuned-7b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65eab2cbb90debef4b676007fe5e57ced650640df620b833cd20f03c2bc3b96d
3
+ size 4783157376
Llama2-MedTuned-7b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfb3e7deb7e93c3701d527d6ee49cc2613463c051cb0bd6d2511720a4ebfc0a
3
+ size 4651692160
Llama2-MedTuned-7b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:076b26e7754085c0c7cbeb38785911573451bd4fba1be4bc6287bcebad0e4f2e
3
+ size 5529194624
Llama2-MedTuned-7b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bd7d65aa674b53cb1088b944d90b74d943ecc263bbfd873b7673d834d3de802
3
+ size 7161090176
README.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Llama2-MedTuned-7b
3
+ emoji: 🧬
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: static
7
+ pinned: false
8
+ license: apache-2.0
9
+ tags:
10
+ - biomedical
11
+ - clinical
12
+ - medical
13
+ - TensorBlock
14
+ - GGUF
15
+ base_model: nlpie/Llama2-MedTuned-7b
16
+ ---
17
+
18
+ <div style="width: auto; margin-left: auto; margin-right: auto">
19
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
20
+ </div>
21
+ <div style="display: flex; justify-content: space-between; width: 100%;">
22
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
23
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
24
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
25
+ </p>
26
+ </div>
27
+ </div>
28
+
29
+ ## nlpie/Llama2-MedTuned-7b - GGUF
30
+
31
+ This repo contains GGUF format model files for [nlpie/Llama2-MedTuned-7b](https://huggingface.co/nlpie/Llama2-MedTuned-7b).
32
+
33
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b5165](https://github.com/ggml-org/llama.cpp/commit/1d735c0b4fa0551c51c2f4ac888dd9a01f447985).
34
+
35
+ ## Our projects
36
+ <table border="1" cellspacing="0" cellpadding="10">
37
+ <tr>
38
+ <th style="font-size: 25px;">Awesome MCP Servers</th>
39
+ <th style="font-size: 25px;">TensorBlock Studio</th>
40
+ </tr>
41
+ <tr>
42
+ <th><img src="https://imgur.com/2Xov7B7.jpeg" alt="Project A" width="450"/></th>
43
+ <th><img src="https://imgur.com/pJcmF5u.jpeg" alt="Project B" width="450"/></th>
44
+ </tr>
45
+ <tr>
46
+ <th>A comprehensive collection of Model Context Protocol (MCP) servers.</th>
47
+ <th>A lightweight, open, and extensible multi-LLM interaction studio.</th>
48
+ </tr>
49
+ <tr>
50
+ <th>
51
+ <a href="https://github.com/TensorBlock/awesome-mcp-servers" target="_blank" style="
52
+ display: inline-block;
53
+ padding: 8px 16px;
54
+ background-color: #FF7F50;
55
+ color: white;
56
+ text-decoration: none;
57
+ border-radius: 6px;
58
+ font-weight: bold;
59
+ font-family: sans-serif;
60
+ ">πŸ‘€ See what we built πŸ‘€</a>
61
+ </th>
62
+ <th>
63
+ <a href="https://github.com/TensorBlock/TensorBlock-Studio" target="_blank" style="
64
+ display: inline-block;
65
+ padding: 8px 16px;
66
+ background-color: #FF7F50;
67
+ color: white;
68
+ text-decoration: none;
69
+ border-radius: 6px;
70
+ font-weight: bold;
71
+ font-family: sans-serif;
72
+ ">πŸ‘€ See what we built πŸ‘€</a>
73
+ </th>
74
+ </tr>
75
+ </table>
76
+
77
+ ## Prompt template
78
+
79
+ ```
80
+ Unable to determine prompt format automatically. Please check the original model repository for the correct prompt format.
81
+ ```
82
+
83
+ ## Model file specification
84
+
85
+ | Filename | Quant type | File Size | Description |
86
+ | -------- | ---------- | --------- | ----------- |
87
+ | [Llama2-MedTuned-7b-Q2_K.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q2_K.gguf) | Q2_K | 2.533 GB | smallest, significant quality loss - not recommended for most purposes |
88
+ | [Llama2-MedTuned-7b-Q3_K_S.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q3_K_S.gguf) | Q3_K_S | 2.948 GB | very small, high quality loss |
89
+ | [Llama2-MedTuned-7b-Q3_K_M.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q3_K_M.gguf) | Q3_K_M | 3.298 GB | very small, high quality loss |
90
+ | [Llama2-MedTuned-7b-Q3_K_L.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q3_K_L.gguf) | Q3_K_L | 3.597 GB | small, substantial quality loss |
91
+ | [Llama2-MedTuned-7b-Q4_0.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q4_0.gguf) | Q4_0 | 3.826 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
92
+ | [Llama2-MedTuned-7b-Q4_K_S.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q4_K_S.gguf) | Q4_K_S | 3.857 GB | small, greater quality loss |
93
+ | [Llama2-MedTuned-7b-Q4_K_M.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q4_K_M.gguf) | Q4_K_M | 4.081 GB | medium, balanced quality - recommended |
94
+ | [Llama2-MedTuned-7b-Q5_0.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q5_0.gguf) | Q5_0 | 4.652 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
95
+ | [Llama2-MedTuned-7b-Q5_K_S.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q5_K_S.gguf) | Q5_K_S | 4.652 GB | large, low quality loss - recommended |
96
+ | [Llama2-MedTuned-7b-Q5_K_M.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q5_K_M.gguf) | Q5_K_M | 4.783 GB | large, very low quality loss - recommended |
97
+ | [Llama2-MedTuned-7b-Q6_K.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q6_K.gguf) | Q6_K | 5.529 GB | very large, extremely low quality loss |
98
+ | [Llama2-MedTuned-7b-Q8_0.gguf](https://huggingface.co/tensorblock/nlpie_Llama2-MedTuned-7b-GGUF/blob/main/Llama2-MedTuned-7b-Q8_0.gguf) | Q8_0 | 7.161 GB | very large, extremely low quality loss - not recommended |
99
+
100
+
101
+ ## Downloading instruction
102
+
103
+ ### Command line
104
+
105
+ Firstly, install Huggingface Client
106
+
107
+ ```shell
108
+ pip install -U "huggingface_hub[cli]"
109
+ ```
110
+
111
+ Then, downoad the individual model file the a local directory
112
+
113
+ ```shell
114
+ huggingface-cli download tensorblock/nlpie_Llama2-MedTuned-7b-GGUF --include "Llama2-MedTuned-7b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
115
+ ```
116
+
117
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
118
+
119
+ ```shell
120
+ huggingface-cli download tensorblock/nlpie_Llama2-MedTuned-7b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
121
+ ```