Tom Aarsen commited on
Commit
ce72927
·
1 Parent(s): 395ad23

Rename the model to add -seq-cls for clarity

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -86,7 +86,7 @@ def format_document(document):
86
  return f"<Document>: {document}{suffix}"
87
 
88
 
89
- model = CrossEncoder("tomaarsen/Qwen3-Reranker-0.6B")
90
 
91
  task = "Given a web search query, retrieve relevant passages that answer the query"
92
 
@@ -131,10 +131,10 @@ def format_instruction(instruction, query, doc):
131
  return output
132
 
133
 
134
- tokenizer = AutoTokenizer.from_pretrained("tomaarsen/Qwen3-Reranker-0.6B", padding_side="left")
135
- model = AutoModelForSequenceClassification.from_pretrained("tomaarsen/Qwen3-Reranker-0.6B").eval()
136
  # We recommend enabling flash_attention_2 for better acceleration and memory saving.
137
- # model = AutoModelForSequenceClassification.from_pretrained("tomaarsen/Qwen3-Reranker-0.6B", torch_dtype=torch.float16, attn_implementation="flash_attention_2").cuda().eval()
138
  max_length = 8192
139
 
140
  task = "Given a web search query, retrieve relevant passages that answer the query"
 
86
  return f"<Document>: {document}{suffix}"
87
 
88
 
89
+ model = CrossEncoder("tomaarsen/Qwen3-Reranker-0.6B-seq-cls")
90
 
91
  task = "Given a web search query, retrieve relevant passages that answer the query"
92
 
 
131
  return output
132
 
133
 
134
+ tokenizer = AutoTokenizer.from_pretrained("tomaarsen/Qwen3-Reranker-0.6B-seq-cls", padding_side="left")
135
+ model = AutoModelForSequenceClassification.from_pretrained("tomaarsen/Qwen3-Reranker-0.6B-seq-cls").eval()
136
  # We recommend enabling flash_attention_2 for better acceleration and memory saving.
137
+ # model = AutoModelForSequenceClassification.from_pretrained("tomaarsen/Qwen3-Reranker-0.6B-seq-cls", torch_dtype=torch.float16, attn_implementation="flash_attention_2").cuda().eval()
138
  max_length = 8192
139
 
140
  task = "Given a web search query, retrieve relevant passages that answer the query"