|
|
|
import dataclasses |
|
import json |
|
from transformers import AutoTokenizer, AutoConfig |
|
import torch |
|
from torchvision.transforms.functional import InterpolationMode |
|
import numpy as np |
|
from ml_dtypes import bfloat16 |
|
from axengine import InferenceSession |
|
from tqdm import tqdm |
|
import torchvision.transforms as T |
|
from PIL import Image |
|
import argparse |
|
|
|
|
|
def post_process(data, topk=1, topp=0.9, temperature=0.6): |
|
def top_p(l: np.ndarray, p: float) -> np.ndarray: |
|
index = np.argsort(l) |
|
res = l.copy() |
|
sum_p = 0 |
|
for i in index[::-1]: |
|
if sum_p >= p: |
|
res[i] = 0 |
|
sum_p += res[i] |
|
return res / sum_p |
|
|
|
def softmax(l: np.ndarray) -> np.ndarray: |
|
l_max = l - l.max() |
|
l_exp = np.exp(l_max) |
|
res = l_exp / np.sum(l_exp) |
|
return res.astype(np.float64) |
|
|
|
r = data.astype(np.float32) |
|
r = r.flatten() |
|
|
|
candidate_index = np.argpartition(r, -topk)[-topk:] |
|
candidate_value = r[candidate_index] |
|
|
|
candidate_value /= temperature |
|
|
|
candidate_soft = softmax(candidate_value) |
|
|
|
candidate_soft = top_p(candidate_soft, topp) |
|
candidate_soft = candidate_soft.astype(np.float64) / candidate_soft.sum() |
|
pos = np.random.multinomial(1, candidate_soft).argmax() |
|
next_token = candidate_index[pos] |
|
return next_token, candidate_index, candidate_soft |
|
|
|
|
|
def generate_slice_indices(token_len, prefill=128, expand=512): |
|
remaining = max(0, token_len - prefill) |
|
extra_blocks = (remaining + expand - 1) // expand |
|
return list(range(extra_blocks + 1)) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
prompt = None |
|
parser = argparse.ArgumentParser(description="Model configuration parameters") |
|
parser.add_argument("--hf_model", type=str, default="../qwen2.5_tokenizer", |
|
help="Path to HuggingFace model") |
|
parser.add_argument("--axmodel_path", type=str, default="../qwen2.5-1.5b-ctx-ax650", |
|
help="Path to save compiled axmodel of llama model") |
|
parser.add_argument("-q", "--question", type=str, default="Please calculate the derivative of the function y=2x^2.", |
|
help="Your question that you want to ask the model.") |
|
args = parser.parse_args() |
|
|
|
device = "cpu" |
|
hf_model_path = args.hf_model |
|
axmodel_path = args.axmodel_path |
|
|
|
cfg = AutoConfig.from_pretrained(hf_model_path, trust_remote_code=True) |
|
tokenizer = AutoTokenizer.from_pretrained(hf_model_path, trust_remote_code=True, use_fast=False) |
|
|
|
prompt = args.question |
|
|
|
print("注意: 这里预先设定了长 prompt 测试. ") |
|
|
|
prompt = '''你能将英文翻译成中文吗? 比如这句话: |
|
Once when I was six years old I saw a magnificent picture in a book, called True Stories from Nature, about the primeval forest. It was a picture of a boa constrictor in the act of swallowing an animal. Here is a copy of the drawing. |
|
In the book it said: "Boa constrictors swallow their prey whole, without chewing it. After that they are not able to move, and they sleep through the six months that they need for digestion." |
|
I pondered deeply, then, over the adventures of the jungle. And after some work with a colored pencil I succeeded in making my first drawing. My Drawing Number One. It looked like this: |
|
I showed my masterpiece to the grown-ups, and asked them whether the drawing frightened them. |
|
But they answered: "Frighten? Why should any one be frightened by a hat?" |
|
My drawing was not a picture of a hat. It was a picture of a boa constrictor digesting an elephant. But since the grown-ups were not able to understand it, I made another drawing: I drew the inside of the boa constrictor, so that the grown-ups could see it clearly. They always need to have things explained. My Drawing Number Two looked like this: |
|
The grown-ups‘ response, this time, was to advise me to lay aside my drawings of boa constrictors, whether from the inside or the outside, and devote myself instead to geography, history, arithmetic and grammar. That is why, at the age of six, I gave up what might have been a magnificent career as a painter. I had been disheartened by the failure of my Drawing Number One and my Drawing Number Two. Grown-ups never understand anything by themselves, and it is tiresome for children to be always and forever explaining things to them. |
|
So then I chose another profession, and learned to pilot airplanes. |
|
''' |
|
|
|
|
|
|
|
prompt = "你知道 `床前明月光,疑是地上霜`是谁写的吗?" |
|
|
|
messages = [ |
|
{"role": "system", "content": "你的名字叫小智(allen), 你是一个人畜无害的 AI 助手. 深圳市今天(4月1日)阴天, 愚人节, 气温在 14°C 至 19°C 之间, 微风."}, |
|
{"role": "user", "content": prompt} |
|
] |
|
text = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True, |
|
) |
|
model_inputs = tokenizer([text], return_tensors="pt").to(device) |
|
token_ids = model_inputs.input_ids[0].cpu().numpy().tolist() |
|
|
|
embeds = np.load(f"{axmodel_path}/model.embed_tokens.weight.npy") |
|
prefill_data = np.take(embeds, token_ids, axis=0) |
|
token_len = len(token_ids) |
|
import pdb; pdb.set_trace() |
|
|
|
|
|
lastN = 2559 |
|
|
|
kv_dim = cfg.hidden_size // cfg.num_attention_heads * cfg.num_key_value_heads |
|
k_caches = [ |
|
np.zeros((1, lastN, kv_dim), dtype=bfloat16) |
|
for _ in range(cfg.num_hidden_layers) |
|
] |
|
v_caches = [ |
|
np.zeros((1, lastN, kv_dim), dtype=bfloat16) |
|
for _ in range(cfg.num_hidden_layers) |
|
] |
|
|
|
prefill_decoder_sessins = [] |
|
|
|
for i in tqdm(range(cfg.num_hidden_layers), desc="Init InferenceSession"): |
|
session = InferenceSession( |
|
f"{axmodel_path}/qwen2_p128_l{i}_together.axmodel" |
|
) |
|
prefill_decoder_sessins.append(session) |
|
|
|
post_process_session = InferenceSession( |
|
f"{axmodel_path}/qwen2_post.axmodel" |
|
) |
|
print("model load done!") |
|
print("prefill token_len: ", token_len) |
|
|
|
""" |
|
Model input shape: |
|
- kv_cache: g1:[1, 1, hidden_size] -> g2:[1, kv_mask_expand_lens, kv_dim] -> g3:[1, kv_mask_expand_lens * 2, kv_dim] |
|
- mask: g1:[1, input_prefill_len, input_prefill_len] -> g2:[1, input_prefill_len, input_prefill_len+kv_mask_expand_lens] -> g3:[1, input_prefill_len, input_prefill_len+kv_mask_expand_lens*2] |
|
- indices: g1:[1, input_prefill_len] -> g2:[1, input_prefill_len] -> g3:[1, input_prefill_len] |
|
- input: g1:[1, input_prefill_len, hidden_size] -> g2:[1, input_prefill_len, hidden_size] -> g3:[1, input_prefill_len, hidden_size] |
|
""" |
|
|
|
input_prefill_len = 128 |
|
kv_mask_expand_len = 128 |
|
|
|
""" |
|
Model output shape: |
|
- kv_cache: g1:[1, input_prefill_len, kv_dim] -> g2:[1, input_prefill_len, kv_dim] -> g3:[1, input_prefill_len, kv_dim] |
|
- output: g1:[1, input_prefill_len, hidden_size] -> g2:[1, input_prefill_len, hidden_size] -> g3:[1, input_prefill_len, hidden_size] |
|
""" |
|
slice_indexs = generate_slice_indices(token_len, input_prefill_len, input_prefill_len) |
|
print(f"slice_indexs is {slice_indexs}") |
|
|
|
""" |
|
prefill |
|
""" |
|
if input_prefill_len > 0: |
|
for slice_index in slice_indexs: |
|
if slice_index == 0: |
|
current_slice_len = input_prefill_len |
|
else: |
|
current_slice_len = kv_mask_expand_len |
|
|
|
indices = np.array( |
|
list( |
|
range( |
|
slice_index * input_prefill_len, |
|
(slice_index + 1) * input_prefill_len, |
|
) |
|
), |
|
np.uint32, |
|
).reshape((1, input_prefill_len)) |
|
|
|
mask = ( |
|
np.zeros((1, input_prefill_len, current_slice_len * slice_index + input_prefill_len)) |
|
- 65536 |
|
) |
|
data = np.zeros((1, input_prefill_len, cfg.hidden_size)).astype(bfloat16) |
|
for i, t in enumerate( |
|
range( |
|
slice_index * input_prefill_len, |
|
(slice_index + 1) * input_prefill_len, |
|
) |
|
): |
|
if t < len(token_ids): |
|
mask[:, i, : slice_index * input_prefill_len + i + 1] = 0 |
|
data[:, i : i + 1, :] = ( |
|
prefill_data[t] |
|
.reshape((1, 1, cfg.hidden_size)) |
|
.astype(bfloat16) |
|
) |
|
|
|
if slice_index == slice_indexs[-1]: |
|
curlen_procd = token_len - slice_index * input_prefill_len |
|
else: |
|
curlen_procd = input_prefill_len |
|
|
|
mask = mask.astype(bfloat16) |
|
for i in range(cfg.num_hidden_layers): |
|
input_feed = { |
|
"K_cache": ( |
|
k_caches[i][:, 0: current_slice_len * slice_index, :] |
|
if slice_index |
|
else np.zeros((1, 1, cfg.hidden_size), dtype=bfloat16) |
|
), |
|
"V_cache": ( |
|
v_caches[i][:, 0: current_slice_len * slice_index, :] |
|
if slice_index |
|
else np.zeros((1, 1, cfg.hidden_size), dtype=bfloat16) |
|
), |
|
"indices": indices, |
|
"input": data, |
|
"mask": mask, |
|
} |
|
outputs = prefill_decoder_sessins[i].run(None, input_feed, shape_group=slice_index + 1) |
|
|
|
k_caches[i][ |
|
:, |
|
slice_index |
|
* input_prefill_len : slice_index |
|
* input_prefill_len + curlen_procd, |
|
:, |
|
] = outputs[0][:, :curlen_procd, :] |
|
|
|
v_caches[i][ |
|
:, |
|
slice_index |
|
* input_prefill_len : slice_index |
|
* input_prefill_len + curlen_procd, |
|
:, |
|
] = outputs[1][:, :curlen_procd, :] |
|
|
|
data = outputs[2] |
|
|
|
print("slice prefill done", slice_index) |
|
|
|
post_out = post_process_session.run( |
|
None, |
|
{ |
|
"input": data[ |
|
:, token_len - (len(slice_indexs) - 1) * input_prefill_len - 1, None, : |
|
] |
|
} |
|
)[0] |
|
next_token, posssible_tokens, possible_soft = post_process(post_out) |
|
posibles = [tokenizer.decode([t]) for t in posssible_tokens] |
|
posible_soft = [str((t, s)) for t, s in zip(posibles, possible_soft)] |
|
token_ids.append(next_token) |
|
|
|
print("answer >>", tokenizer.decode(token_ids[token_len], skip_special_tokens=True), end='', flush=True) |
|
|
|
|
|
|
|
kv_cache_len = lastN |
|
mask = np.zeros((1, 1, kv_cache_len + 1), dtype=np.float32).astype(bfloat16) |
|
mask[:, :, :kv_cache_len] -= 65536 |
|
if input_prefill_len > 0: |
|
mask[:, :, :token_len] = 0 |
|
|
|
|
|
for start_indice in range(kv_cache_len): |
|
if input_prefill_len > 0 and start_indice < token_len: |
|
continue |
|
|
|
next_token = token_ids[start_indice] |
|
indices = np.array([start_indice], np.uint32).reshape((1, 1)) |
|
data = embeds[next_token, :].reshape((1, 1, cfg.hidden_size)).astype(bfloat16) |
|
for i in range(cfg.num_hidden_layers): |
|
input_feed = { |
|
"K_cache": k_caches[i], |
|
"V_cache": v_caches[i], |
|
"indices": indices, |
|
"input": data, |
|
"mask": mask, |
|
} |
|
outputs = prefill_decoder_sessins[i].run(None, input_feed, shape_group=0) |
|
k_caches[i][:, start_indice, :] = outputs[0][:, :, :] |
|
v_caches[i][:, start_indice, :] = outputs[1][:, :, :] |
|
data = outputs[2] |
|
mask[..., start_indice] = 0 |
|
if start_indice < token_len - 1: |
|
pass |
|
else: |
|
post_out = post_process_session.run(None, {"input": data})[0] |
|
next_token, posssible_tokens, possible_soft = post_process(post_out) |
|
token_ids.append(next_token) |
|
if next_token == tokenizer.eos_token_id and next_token > token_len: |
|
break |
|
|
|
print(tokenizer.decode(next_token, skip_special_tokens=True), end='', flush=True) |
|
|