nielsr HF Staff commited on
Commit
09ae5dd
·
verified ·
1 Parent(s): c59d17c

Add pipeline tag

Browse files

This PR adds the `pipeline_tag: text-generation` to the model card's metadata. This ensures that the model can be properly categorized and discovered on the Hugging Face Hub, appearing under searches for text generation models.

Files changed (1) hide show
  1. README.md +8 -3
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
  base_model: google/gemma-2-9b
3
- license: cc-by-nc-sa-4.0
4
  language:
5
  - de
6
  - nl
@@ -25,6 +24,8 @@ language:
25
  - ro
26
  - fi
27
  library_name: transformers
 
 
28
  ---
29
 
30
  ![Tower Plus Pareto](./Tower-plus-pareto.png)
@@ -71,7 +72,9 @@ sampling_params = SamplingParams(
71
  max_tokens=8192,
72
  )
73
  llm = LLM(model="Unbabel/Tower-Plus-9B", tensor_parallel_size=1)
74
- messages = [{"role": "user", "content": "Translate the following English source text to Portuguese (Portugal):\nEnglish: Hello world!\nPortuguese (Portugal): "}]
 
 
75
  outputs = llm.chat(messages, sampling_params)
76
  # Make sure your prompt_token_ids look like this
77
  print (outputs[0].outputs[0].text)
@@ -89,7 +92,9 @@ from transformers import pipeline
89
 
90
  pipe = pipeline("text-generation", model="Unbabel/Tower-Plus-9B", device_map="auto")
91
  # We use the tokenizer’s chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
92
- messages = [{"role": "user", "content": "Translate the following English source text to Portuguese (Portugal):\nEnglish: Hello world!\nPortuguese (Portugal): "}]
 
 
93
  input_ids = pipe.tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
94
  outputs = pipe(messages, max_new_tokens=256, do_sample=False)
95
  print(outputs[0]["generated_text"])
 
1
  ---
2
  base_model: google/gemma-2-9b
 
3
  language:
4
  - de
5
  - nl
 
24
  - ro
25
  - fi
26
  library_name: transformers
27
+ license: cc-by-nc-sa-4.0
28
+ pipeline_tag: text-generation
29
  ---
30
 
31
  ![Tower Plus Pareto](./Tower-plus-pareto.png)
 
72
  max_tokens=8192,
73
  )
74
  llm = LLM(model="Unbabel/Tower-Plus-9B", tensor_parallel_size=1)
75
+ messages = [{"role": "user", "content": "Translate the following English source text to Portuguese (Portugal):
76
+ English: Hello world!
77
+ Portuguese (Portugal): "}]
78
  outputs = llm.chat(messages, sampling_params)
79
  # Make sure your prompt_token_ids look like this
80
  print (outputs[0].outputs[0].text)
 
92
 
93
  pipe = pipeline("text-generation", model="Unbabel/Tower-Plus-9B", device_map="auto")
94
  # We use the tokenizer’s chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
95
+ messages = [{"role": "user", "content": "Translate the following English source text to Portuguese (Portugal):
96
+ English: Hello world!
97
+ Portuguese (Portugal): "}]
98
  input_ids = pipe.tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
99
  outputs = pipe(messages, max_new_tokens=256, do_sample=False)
100
  print(outputs[0]["generated_text"])