Spaces:
Runtime error
Runtime error
import subprocess | |
def uninstall_and_reinstall_bitsandbytes(): | |
try: | |
# Uninstall bitsandbytes | |
subprocess.run(["pip", "uninstall", "-y", "bitsandbytes"], check=True) | |
print("Uninstalled bitsandbytes successfully.") | |
# Reinstall bitsandbytes from the specified source | |
subprocess.run(["pip", "install", "-i", "https://pypi.org/simple/", "bitsandbytes"], check=True) | |
print("Reinstalled bitsandbytes successfully.") | |
except subprocess.CalledProcessError as e: | |
print(f"Error during uninstallation or reinstallation: {e}") | |
# Call the function to perform the uninstallation and reinstallation | |
uninstall_and_reinstall_bitsandbytes() | |
import os | |
HF_TOKEN = os.environ["HF_TOKEN"] | |
# os.environ["BITSANDBYTES_NOWELCOME"] = "1" | |
import re | |
import spaces | |
import gradio as gr | |
import torch | |
print(f"Is CUDA available: {torch.cuda.is_available()}") | |
# True | |
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}") | |
# Tesla T4 | |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig | |
from huggingface_hub import login, HfFolder | |
# tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged16bit_clean_final", trust_remote_code=True) | |
# quantization_config = BitsAndBytesConfig( | |
# load_in_4bit=True, | |
# bnb_4bit_use_double_quant=True, | |
# bnb_4bit_quant_type="nf4", | |
# bnb_4bit_compute_dtype=torch.float16) | |
# model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged16bit_clean_final", | |
# device_map="auto", | |
# quantization_config=quantization_config, | |
# torch_dtype =torch.float16, | |
# low_cpu_mem_usage=True, | |
# trust_remote_code=True) | |
# from transformers import AutoModelForCausalLM, AutoTokenizer | |
# from peft import PeftModel | |
# # 1. Load Your Base Model and LoRA Adapter | |
# model_name_or_path = "FlawedLLM/Bhashini_gemma_merged4bit_clean_final" # Hugging Face model or local path | |
# lora_weights = "FlawedLLM/Bhashini_gemma_lora_clean_final" # LoRA weights | |
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) | |
# model = AutoModelForCausalLM.from_pretrained(model_name_or_path, load_in_8bit=True, device_map='auto') | |
# model = PeftModel.from_pretrained(model, lora_weights) | |
# Load model directly | |
# from transformers import AutoTokenizer, AutoModelForCausalLM | |
# bnb_config = BitsAndBytesConfig( | |
# load_in_4bit=True, | |
# llm_int8_threshold=6.0, | |
# ) | |
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final") | |
model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final", | |
device_map="auto",) | |
# quantization_config=quantization_config,) | |
# alpaca_prompt = You MUST copy from above! | |
def chunk_it(input_command, item_list): | |
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. | |
### Instruction: | |
{} | |
### Input: | |
{} | |
### Response: | |
{}""" | |
if item_list is not None: | |
item_list = f'''The ItemName should be chosen from the given list : {item_list} , except when adding item. If ItemName does not find anything SIMILAR in the list, then the ItemName should be "Null" ''' | |
inputs = tokenizer( | |
[ | |
alpaca_prompt.format( | |
f''' | |
You will receive text input that you need to analyze to perform the following tasks: | |
transaction: Record the details of an item transaction. | |
last n days transactions: Retrieve transaction records for a specified time period. | |
view risk inventory: View inventory items based on a risk category. | |
view inventory: View inventory details. | |
new items: Add new items to the inventory. | |
old items: View old items in inventory. | |
report generation: Generate various inventory reports. | |
Required Parameters: | |
Each task requires specific parameters to execute correctly: | |
transaction: | |
ItemName (string) | |
ItemQt (quantity - integer) | |
Type (string: "sale" or "purchase" or "return") | |
ShelfNo (string or integer) | |
ReorderPoint (integer) | |
last n days transactions: | |
ItemName (string) | |
Duration (integer: number of days) | |
view risk inventory: | |
RiskType (string: "overstock", "understock", or Null for all risk types) | |
view inventory: | |
ItemName (string) | |
ShelfNo (string or integer) | |
new items: | |
ItemName (string) | |
SellingPrice (number) | |
CostPrice (number) | |
old items: | |
ShelfNo (string or integer) | |
report generation: | |
ItemName (string) | |
Duration (integer: number of days) | |
ReportType (string: "profit", "revenue", "inventory", or Null for all reports) | |
{item_list} | |
ALWAYS provide output in a JSON format.''', # instruction | |
input_command, # input | |
"", # output - leave this blank for generation! | |
) | |
], return_tensors = "pt").to("cuda") | |
outputs = model.generate(**inputs, max_new_tokens = 216, use_cache = True) | |
tokenizer.batch_decode(outputs) | |
reply=tokenizer.batch_decode(outputs) | |
# Regular expression pattern to match content between "### Response:" and "<|end_of_text|>" | |
pattern = r"### Response:\n(.*?)<eos>" | |
# Search for the pattern in the text | |
match = re.search(pattern, reply[0], re.DOTALL) # re.DOTALL allows '.' to match newlines | |
reply = match.group(1).strip() # Extract and remove extra whitespace | |
return reply | |
iface = gr.Interface( | |
fn=chunk_it, | |
inputs=[ | |
gr.Textbox(label="Input Command", lines=3), | |
gr.Textbox(label="Item List", lines=5) | |
], | |
outputs="text", | |
title="Formatter Pro", | |
) | |
iface.launch(inline=False) |