Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -6,6 +6,7 @@ tags:
6
  - llama-2
7
  - mlx
8
  pipeline_tag: text-generation
 
9
  ---
10
  ![Alt text](https://media.discordapp.net/attachments/989904887330521099/1201717650128896070/Llama_Coding_on_MacBook_1.png?ex=65cad5c6&is=65b860c6&hm=8008a5817272fa49fca67143516563b2578accf263cc04d6768e689c1be2f483&=&format=webp&quality=lossless&width=1372&height=1372)
11
  # mlx-community/CodeLlama-7b-Instruct-hf-4bit-MLX
@@ -22,4 +23,4 @@ from mlx_lm import load, generate
22
 
23
  model, tokenizer = load("mlx-community/CodeLlama-7b-Instruct-hf-4bit-MLX")
24
  response = generate(model, tokenizer, prompt="hello", verbose=True)
25
- ```
 
6
  - llama-2
7
  - mlx
8
  pipeline_tag: text-generation
9
+ new_version: mlx-community/CodeLlama-7b-Instruct-hf-4bit-mlx-2
10
  ---
11
  ![Alt text](https://media.discordapp.net/attachments/989904887330521099/1201717650128896070/Llama_Coding_on_MacBook_1.png?ex=65cad5c6&is=65b860c6&hm=8008a5817272fa49fca67143516563b2578accf263cc04d6768e689c1be2f483&=&format=webp&quality=lossless&width=1372&height=1372)
12
  # mlx-community/CodeLlama-7b-Instruct-hf-4bit-MLX
 
23
 
24
  model, tokenizer = load("mlx-community/CodeLlama-7b-Instruct-hf-4bit-MLX")
25
  response = generate(model, tokenizer, prompt="hello", verbose=True)
26
+ ```