Update gguf/gguf_reader.py (#3)
Browse files- Update gguf/gguf_reader.py (afc0147a3be536fb93b94e9717456b4247e0d9be)
Co-authored-by: Sean Nam <[email protected]>
- gguf/gguf_reader.py +60 -11
gguf/gguf_reader.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
#
|
2 |
-
#
|
3 |
-
# please see the files scripts/ for some fairly simple examples.
|
4 |
#
|
5 |
from __future__ import annotations
|
6 |
|
7 |
import logging
|
8 |
import os
|
|
|
9 |
from collections import OrderedDict
|
10 |
from typing import Any, Literal, NamedTuple, TypeVar, Union
|
11 |
|
@@ -15,7 +15,6 @@ import numpy.typing as npt
|
|
15 |
from .quants import quant_shape_to_byte_shape
|
16 |
|
17 |
if __name__ == "__main__":
|
18 |
-
import sys
|
19 |
from pathlib import Path
|
20 |
|
21 |
# Allow running file in package as a script.
|
@@ -28,6 +27,7 @@ from gguf.constants import (
|
|
28 |
GGUF_VERSION,
|
29 |
GGMLQuantizationType,
|
30 |
GGUFValueType,
|
|
|
31 |
)
|
32 |
|
33 |
logger = logging.getLogger(__name__)
|
@@ -53,6 +53,48 @@ class ReaderField(NamedTuple):
|
|
53 |
|
54 |
types: list[GGUFValueType] = []
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
class ReaderTensor(NamedTuple):
|
58 |
name: str
|
@@ -101,10 +143,19 @@ class GGUFReader:
|
|
101 |
# If we get 0 here that means it's (probably) a GGUF file created for
|
102 |
# the opposite byte order of the machine this script is running on.
|
103 |
self.byte_order = 'S'
|
104 |
-
temp_version = temp_version.newbyteorder(self.byte_order)
|
105 |
version = temp_version[0]
|
106 |
if version not in READER_SUPPORTED_VERSIONS:
|
107 |
raise ValueError(f'Sorry, file appears to be version {version} which we cannot handle')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
self.fields: OrderedDict[str, ReaderField] = OrderedDict()
|
109 |
self.tensors: list[ReaderTensor] = []
|
110 |
offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32]))
|
@@ -145,11 +196,8 @@ class GGUFReader:
|
|
145 |
count = int(count)
|
146 |
itemsize = int(np.empty([], dtype = dtype).itemsize)
|
147 |
end_offs = offset + itemsize * count
|
148 |
-
|
149 |
-
|
150 |
-
.view(dtype = dtype)[:count]
|
151 |
-
.newbyteorder(override_order or self.byte_order)
|
152 |
-
)
|
153 |
|
154 |
def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int:
|
155 |
if field.name in self.fields:
|
@@ -191,6 +239,7 @@ class GGUFReader:
|
|
191 |
offs += int(alen.nbytes)
|
192 |
aparts: list[npt.NDArray[Any]] = [raw_itype, alen]
|
193 |
data_idxs: list[int] = []
|
|
|
194 |
for idx in range(alen[0]):
|
195 |
curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(offs, raw_itype[0])
|
196 |
if idx == 0:
|
@@ -201,7 +250,7 @@ class GGUFReader:
|
|
201 |
offs += curr_size
|
202 |
return offs - orig_offs, aparts, data_idxs, types
|
203 |
# We can't deal with this one.
|
204 |
-
raise ValueError('Unknown/unhandled field type {gtype}')
|
205 |
|
206 |
def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
|
207 |
offs = orig_offs
|
@@ -314,4 +363,4 @@ class GGUFReader:
|
|
314 |
data = self._get(data_offs, item_type, item_count).reshape(np_dims),
|
315 |
field = field,
|
316 |
))
|
317 |
-
self.tensors = tensors
|
|
|
1 |
#
|
2 |
+
# https://github.com/ggml-org/llama.cpp/blob/master/gguf-py/gguf/gguf_reader.py
|
|
|
3 |
#
|
4 |
from __future__ import annotations
|
5 |
|
6 |
import logging
|
7 |
import os
|
8 |
+
import sys
|
9 |
from collections import OrderedDict
|
10 |
from typing import Any, Literal, NamedTuple, TypeVar, Union
|
11 |
|
|
|
15 |
from .quants import quant_shape_to_byte_shape
|
16 |
|
17 |
if __name__ == "__main__":
|
|
|
18 |
from pathlib import Path
|
19 |
|
20 |
# Allow running file in package as a script.
|
|
|
27 |
GGUF_VERSION,
|
28 |
GGMLQuantizationType,
|
29 |
GGUFValueType,
|
30 |
+
GGUFEndian,
|
31 |
)
|
32 |
|
33 |
logger = logging.getLogger(__name__)
|
|
|
53 |
|
54 |
types: list[GGUFValueType] = []
|
55 |
|
56 |
+
def contents(self, index_or_slice: int | slice = slice(None)) -> Any:
|
57 |
+
if self.types:
|
58 |
+
to_string = lambda x: str(x.tobytes(), encoding='utf-8') # noqa: E731
|
59 |
+
main_type = self.types[0]
|
60 |
+
|
61 |
+
if main_type == GGUFValueType.ARRAY:
|
62 |
+
sub_type = self.types[-1]
|
63 |
+
|
64 |
+
if sub_type == GGUFValueType.STRING:
|
65 |
+
indices = self.data[index_or_slice]
|
66 |
+
|
67 |
+
if isinstance(index_or_slice, int):
|
68 |
+
return to_string(self.parts[indices]) # type: ignore
|
69 |
+
else:
|
70 |
+
return [to_string(self.parts[idx]) for idx in indices] # type: ignore
|
71 |
+
else:
|
72 |
+
# FIXME: When/if _get_field_parts() support multi-dimensional arrays, this must do so too
|
73 |
+
|
74 |
+
# Check if it's unsafe to perform slice optimization on data
|
75 |
+
# if any(True for idx in self.data if len(self.parts[idx]) != 1):
|
76 |
+
# optim_slice = slice(None)
|
77 |
+
# else:
|
78 |
+
# optim_slice = index_or_slice
|
79 |
+
# index_or_slice = slice(None)
|
80 |
+
|
81 |
+
# if isinstance(optim_slice, int):
|
82 |
+
# return self.parts[self.data[optim_slice]].tolist()[0]
|
83 |
+
# else:
|
84 |
+
# return [pv for idx in self.data[optim_slice] for pv in self.parts[idx].tolist()][index_or_slice]
|
85 |
+
|
86 |
+
if isinstance(index_or_slice, int):
|
87 |
+
return self.parts[self.data[index_or_slice]].tolist()[0]
|
88 |
+
else:
|
89 |
+
return [pv for idx in self.data[index_or_slice] for pv in self.parts[idx].tolist()]
|
90 |
+
|
91 |
+
if main_type == GGUFValueType.STRING:
|
92 |
+
return to_string(self.parts[-1])
|
93 |
+
else:
|
94 |
+
return self.parts[-1].tolist()[0]
|
95 |
+
|
96 |
+
return None
|
97 |
+
|
98 |
|
99 |
class ReaderTensor(NamedTuple):
|
100 |
name: str
|
|
|
143 |
# If we get 0 here that means it's (probably) a GGUF file created for
|
144 |
# the opposite byte order of the machine this script is running on.
|
145 |
self.byte_order = 'S'
|
146 |
+
temp_version = temp_version.view(temp_version.dtype.newbyteorder(self.byte_order))
|
147 |
version = temp_version[0]
|
148 |
if version not in READER_SUPPORTED_VERSIONS:
|
149 |
raise ValueError(f'Sorry, file appears to be version {version} which we cannot handle')
|
150 |
+
if sys.byteorder == "little":
|
151 |
+
# Host is little endian
|
152 |
+
host_endian = GGUFEndian.LITTLE
|
153 |
+
swapped_endian = GGUFEndian.BIG
|
154 |
+
else:
|
155 |
+
# Sorry PDP or other weird systems that don't use BE or LE.
|
156 |
+
host_endian = GGUFEndian.BIG
|
157 |
+
swapped_endian = GGUFEndian.LITTLE
|
158 |
+
self.endianess = swapped_endian if self.byte_order == "S" else host_endian
|
159 |
self.fields: OrderedDict[str, ReaderField] = OrderedDict()
|
160 |
self.tensors: list[ReaderTensor] = []
|
161 |
offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32]))
|
|
|
196 |
count = int(count)
|
197 |
itemsize = int(np.empty([], dtype = dtype).itemsize)
|
198 |
end_offs = offset + itemsize * count
|
199 |
+
arr = self.data[offset:end_offs].view(dtype=dtype)[:count]
|
200 |
+
return arr.view(arr.dtype.newbyteorder(self.byte_order if override_order is None else override_order))
|
|
|
|
|
|
|
201 |
|
202 |
def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int:
|
203 |
if field.name in self.fields:
|
|
|
239 |
offs += int(alen.nbytes)
|
240 |
aparts: list[npt.NDArray[Any]] = [raw_itype, alen]
|
241 |
data_idxs: list[int] = []
|
242 |
+
# FIXME: Handle multi-dimensional arrays properly instead of flattening
|
243 |
for idx in range(alen[0]):
|
244 |
curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(offs, raw_itype[0])
|
245 |
if idx == 0:
|
|
|
250 |
offs += curr_size
|
251 |
return offs - orig_offs, aparts, data_idxs, types
|
252 |
# We can't deal with this one.
|
253 |
+
raise ValueError(f'Unknown/unhandled field type {gtype}')
|
254 |
|
255 |
def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
|
256 |
offs = orig_offs
|
|
|
363 |
data = self._get(data_offs, item_type, item_count).reshape(np_dims),
|
364 |
field = field,
|
365 |
))
|
366 |
+
self.tensors = tensors
|