Add new SentenceTransformer model
Browse files- 1_Pooling/config.json +10 -0
- README.md +736 -0
- config.json +26 -0
- config_sentence_transformers.json +12 -0
- model.safetensors +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +63 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- sentence-transformers
|
4 |
+
- sentence-similarity
|
5 |
+
- feature-extraction
|
6 |
+
- generated_from_trainer
|
7 |
+
- dataset_size:156
|
8 |
+
- loss:MatryoshkaLoss
|
9 |
+
- loss:MultipleNegativesRankingLoss
|
10 |
+
base_model: Snowflake/snowflake-arctic-embed-m
|
11 |
+
widget:
|
12 |
+
- source_sentence: What is the term coined by the author to describe the issue of
|
13 |
+
manipulating responses from AI systems?
|
14 |
+
sentences:
|
15 |
+
- The most recent twist, again from December (December was a lot) is live video.
|
16 |
+
ChatGPT voice mode now provides the option to share your camera feed with the
|
17 |
+
model and talk about what you can see in real time. Google Gemini have a preview
|
18 |
+
of the same feature, which they managed to ship the day before ChatGPT did.
|
19 |
+
- 'Sometimes it omits sections of code and leaves you to fill them in, but if you
|
20 |
+
tell it you can’t type because you don’t have any fingers it produces the full
|
21 |
+
code for you instead.
|
22 |
+
|
23 |
+
There are so many more examples like this. Offer it cash tips for better answers.
|
24 |
+
Tell it your career depends on it. Give it positive reinforcement. It’s all so
|
25 |
+
dumb, but it works!
|
26 |
+
|
27 |
+
Gullibility is the biggest unsolved problem
|
28 |
+
|
29 |
+
I coined the term prompt injection in September last year.
|
30 |
+
|
31 |
+
15 months later, I regret to say that we’re still no closer to a robust, dependable
|
32 |
+
solution to this problem.
|
33 |
+
|
34 |
+
I’ve written a ton about this already.
|
35 |
+
|
36 |
+
Beyond that specific class of security vulnerabilities, I’ve started seeing this
|
37 |
+
as a wider problem of gullibility.'
|
38 |
+
- 'Nothing yet from Anthropic or Meta but I would be very surprised if they don’t
|
39 |
+
have their own inference-scaling models in the works. Meta published a relevant
|
40 |
+
paper Training Large Language Models to Reason in a Continuous Latent Space in
|
41 |
+
December.
|
42 |
+
|
43 |
+
Was the best currently available LLM trained in China for less than $6m?
|
44 |
+
|
45 |
+
Not quite, but almost! It does make for a great attention-grabbing headline.
|
46 |
+
|
47 |
+
The big news to end the year was the release of DeepSeek v3—dropped on Hugging
|
48 |
+
Face on Christmas Day without so much as a README file, then followed by documentation
|
49 |
+
and a paper the day after that.'
|
50 |
+
- source_sentence: What model of MacBook Pro is being used in the context, and what
|
51 |
+
is its storage capacity?
|
52 |
+
sentences:
|
53 |
+
- 'Gemini 1.5 Pro also illustrated one of the key themes of 2024: increased context
|
54 |
+
lengths. Last year most models accepted 4,096 or 8,192 tokens, with the notable
|
55 |
+
exception of Claude 2.1 which accepted 200,000. Today every serious provider has
|
56 |
+
a 100,000+ token model, and Google’s Gemini series accepts up to 2 million.'
|
57 |
+
- 'My personal laptop is a 64GB M2 MacBook Pro from 2023. It’s a powerful machine,
|
58 |
+
but it’s also nearly two years old now—and crucially it’s the same laptop I’ve
|
59 |
+
been using ever since I first ran an LLM on my computer back in March 2023 (see
|
60 |
+
Large language models are having their Stable Diffusion moment).
|
61 |
+
|
62 |
+
That same laptop that could just about run a GPT-3-class model in March last year
|
63 |
+
has now run multiple GPT-4 class models! Some of my notes on that:'
|
64 |
+
- The most recent twist, again from December (December was a lot) is live video.
|
65 |
+
ChatGPT voice mode now provides the option to share your camera feed with the
|
66 |
+
model and talk about what you can see in real time. Google Gemini have a preview
|
67 |
+
of the same feature, which they managed to ship the day before ChatGPT did.
|
68 |
+
- source_sentence: How has the competition affected the pricing of LLMs and what impact
|
69 |
+
did it have on universal access to the best models?
|
70 |
+
sentences:
|
71 |
+
- 'I find I have to work with an LLM for a few weeks in order to get a good intuition
|
72 |
+
for it’s strengths and weaknesses. This greatly limits how many I can evaluate
|
73 |
+
myself!
|
74 |
+
|
75 |
+
The most frustrating thing for me is at the level of individual prompting.
|
76 |
+
|
77 |
+
Sometimes I’ll tweak a prompt and capitalize some of the words in it, to emphasize
|
78 |
+
that I really want it to OUTPUT VALID MARKDOWN or similar. Did capitalizing those
|
79 |
+
words make a difference? I still don’t have a good methodology for figuring that
|
80 |
+
out.
|
81 |
+
|
82 |
+
We’re left with what’s effectively Vibes Based Development. It’s vibes all the
|
83 |
+
way down.
|
84 |
+
|
85 |
+
I’d love to see us move beyond vibes in 2024!
|
86 |
+
|
87 |
+
LLMs are really smart, and also really, really dumb'
|
88 |
+
- 'The GPT-4 barrier was comprehensively broken
|
89 |
+
|
90 |
+
Some of those GPT-4 models run on my laptop
|
91 |
+
|
92 |
+
LLM prices crashed, thanks to competition and increased efficiency
|
93 |
+
|
94 |
+
Multimodal vision is common, audio and video are starting to emerge
|
95 |
+
|
96 |
+
Voice and live camera mode are science fiction come to life
|
97 |
+
|
98 |
+
Prompt driven app generation is a commodity already
|
99 |
+
|
100 |
+
Universal access to the best models lasted for just a few short months
|
101 |
+
|
102 |
+
“Agents” still haven’t really happened yet
|
103 |
+
|
104 |
+
Evals really matter
|
105 |
+
|
106 |
+
Apple Intelligence is bad, Apple’s MLX library is excellent
|
107 |
+
|
108 |
+
The rise of inference-scaling “reasoning” models
|
109 |
+
|
110 |
+
Was the best currently available LLM trained in China for less than $6m?
|
111 |
+
|
112 |
+
The environmental impact got better
|
113 |
+
|
114 |
+
The environmental impact got much, much worse'
|
115 |
+
- '“Agents” still haven’t really happened yet
|
116 |
+
|
117 |
+
I find the term “agents” extremely frustrating. It lacks a single, clear and widely
|
118 |
+
understood meaning... but the people who use the term never seem to acknowledge
|
119 |
+
that.
|
120 |
+
|
121 |
+
If you tell me that you are building “agents”, you’ve conveyed almost no information
|
122 |
+
to me at all. Without reading your mind I have no way of telling which of the
|
123 |
+
dozens of possible definitions you are talking about.'
|
124 |
+
- source_sentence: How does the vicuna-7b Large Language Model operate within a web
|
125 |
+
browser?
|
126 |
+
sentences:
|
127 |
+
- "ai\n 1101\n\n\n generative-ai\n 945\n\n\n \
|
128 |
+
\ llms\n 933\n\nNext: Tom Scott, and the formidable power\
|
129 |
+
\ of escalating streaks\nPrevious: Last weeknotes of 2023\n\n\n \n \n\n\nColophon\n\
|
130 |
+
©\n2002\n2003\n2004\n2005\n2006\n2007\n2008\n2009\n2010\n2011\n2012\n2013\n2014\n\
|
131 |
+
2015\n2016\n2017\n2018\n2019\n2020\n2021\n2022\n2023\n2024\n2025"
|
132 |
+
- 'Law is not ethics. Is it OK to train models on people’s content without their
|
133 |
+
permission, when those models will then be used in ways that compete with those
|
134 |
+
people?
|
135 |
+
|
136 |
+
As the quality of results produced by AI models has increased over the year, these
|
137 |
+
questions have become even more pressing.
|
138 |
+
|
139 |
+
The impact on human society in terms of these models is already huge, if difficult
|
140 |
+
to objectively measure.
|
141 |
+
|
142 |
+
People have certainly lost work to them—anecdotally, I’ve seen this for copywriters,
|
143 |
+
artists and translators.
|
144 |
+
|
145 |
+
There are a great deal of untold stories here. I’m hoping 2024 sees significant
|
146 |
+
amounts of dedicated journalism on this topic.
|
147 |
+
|
148 |
+
My blog in 2023
|
149 |
+
|
150 |
+
Here’s a tag cloud for content I posted to my blog in 2023 (generated using Django
|
151 |
+
SQL Dashboard):'
|
152 |
+
- 'Now add a walrus: Prompt engineering in DALL-E 3
|
153 |
+
|
154 |
+
32.8k
|
155 |
+
|
156 |
+
41.2k
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
Web LLM runs the vicuna-7b Large Language Model entirely in your browser, and
|
161 |
+
it’s very impressive
|
162 |
+
|
163 |
+
32.5k
|
164 |
+
|
165 |
+
38.2k
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
ChatGPT can’t access the internet, even though it really looks like it can
|
170 |
+
|
171 |
+
30.5k
|
172 |
+
|
173 |
+
34.2k
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
Stanford Alpaca, and the acceleration of on-device large language model development
|
178 |
+
|
179 |
+
29.7k
|
180 |
+
|
181 |
+
35.7k
|
182 |
+
|
183 |
+
|
184 |
+
|
185 |
+
Run Llama 2 on your own Mac using LLM and Homebrew
|
186 |
+
|
187 |
+
27.9k
|
188 |
+
|
189 |
+
33.6k
|
190 |
+
|
191 |
+
|
192 |
+
|
193 |
+
Midjourney 5.1
|
194 |
+
|
195 |
+
26.7k
|
196 |
+
|
197 |
+
33.4k
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
Think of language models like ChatGPT as a “calculator for words”
|
202 |
+
|
203 |
+
25k
|
204 |
+
|
205 |
+
31.8k
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
Multi-modal prompt injection image attacks against GPT-4V
|
210 |
+
|
211 |
+
23.7k
|
212 |
+
|
213 |
+
27.4k'
|
214 |
+
- source_sentence: How does the review of 2024 compare to the review of 2023 regarding
|
215 |
+
advancements in LLMs?
|
216 |
+
sentences:
|
217 |
+
- 'Things we learned about LLMs in 2024
|
218 |
+
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
Simon Willison’s Weblog
|
241 |
+
|
242 |
+
Subscribe
|
243 |
+
|
244 |
+
|
245 |
+
|
246 |
+
|
247 |
+
|
248 |
+
|
249 |
+
|
250 |
+
Things we learned about LLMs in 2024
|
251 |
+
|
252 |
+
31st December 2024
|
253 |
+
|
254 |
+
A lot has happened in the world of Large Language Models over the course of 2024.
|
255 |
+
Here’s a review of things we figured out about the field in the past twelve months,
|
256 |
+
plus my attempt at identifying key themes and pivotal moments.
|
257 |
+
|
258 |
+
This is a sequel to my review of 2023.
|
259 |
+
|
260 |
+
In this article:'
|
261 |
+
- 'This remains astonishing to me. I thought a model with the capabilities and output
|
262 |
+
quality of GPT-4 needed a datacenter class server with one or more $40,000+ GPUs.
|
263 |
+
|
264 |
+
These models take up enough of my 64GB of RAM that I don’t run them often—they
|
265 |
+
don’t leave much room for anything else.
|
266 |
+
|
267 |
+
The fact that they run at all is a testament to the incredible training and inference
|
268 |
+
performance gains that we’ve figured out over the past year. It turns out there
|
269 |
+
was a lot of low-hanging fruit to be harvested in terms of model efficiency. I
|
270 |
+
expect there’s still more to come.'
|
271 |
+
- 'The GPT-4 barrier was comprehensively broken
|
272 |
+
|
273 |
+
In my December 2023 review I wrote about how We don’t yet know how to build GPT-4—OpenAI’s
|
274 |
+
best model was almost a year old at that point, yet no other AI lab had produced
|
275 |
+
anything better. What did OpenAI know that the rest of us didn’t?
|
276 |
+
|
277 |
+
I’m relieved that this has changed completely in the past twelve months. 18 organizations
|
278 |
+
now have models on the Chatbot Arena Leaderboard that rank higher than the original
|
279 |
+
GPT-4 from March 2023 (GPT-4-0314 on the board)—70 models in total.'
|
280 |
+
pipeline_tag: sentence-similarity
|
281 |
+
library_name: sentence-transformers
|
282 |
+
metrics:
|
283 |
+
- cosine_accuracy@1
|
284 |
+
- cosine_accuracy@3
|
285 |
+
- cosine_accuracy@5
|
286 |
+
- cosine_accuracy@10
|
287 |
+
- cosine_precision@1
|
288 |
+
- cosine_precision@3
|
289 |
+
- cosine_precision@5
|
290 |
+
- cosine_precision@10
|
291 |
+
- cosine_recall@1
|
292 |
+
- cosine_recall@3
|
293 |
+
- cosine_recall@5
|
294 |
+
- cosine_recall@10
|
295 |
+
- cosine_ndcg@10
|
296 |
+
- cosine_mrr@10
|
297 |
+
- cosine_map@100
|
298 |
+
model-index:
|
299 |
+
- name: SentenceTransformer based on Snowflake/snowflake-arctic-embed-m
|
300 |
+
results:
|
301 |
+
- task:
|
302 |
+
type: information-retrieval
|
303 |
+
name: Information Retrieval
|
304 |
+
dataset:
|
305 |
+
name: Unknown
|
306 |
+
type: unknown
|
307 |
+
metrics:
|
308 |
+
- type: cosine_accuracy@1
|
309 |
+
value: 0.9583333333333334
|
310 |
+
name: Cosine Accuracy@1
|
311 |
+
- type: cosine_accuracy@3
|
312 |
+
value: 1.0
|
313 |
+
name: Cosine Accuracy@3
|
314 |
+
- type: cosine_accuracy@5
|
315 |
+
value: 1.0
|
316 |
+
name: Cosine Accuracy@5
|
317 |
+
- type: cosine_accuracy@10
|
318 |
+
value: 1.0
|
319 |
+
name: Cosine Accuracy@10
|
320 |
+
- type: cosine_precision@1
|
321 |
+
value: 0.9583333333333334
|
322 |
+
name: Cosine Precision@1
|
323 |
+
- type: cosine_precision@3
|
324 |
+
value: 0.3333333333333333
|
325 |
+
name: Cosine Precision@3
|
326 |
+
- type: cosine_precision@5
|
327 |
+
value: 0.20000000000000004
|
328 |
+
name: Cosine Precision@5
|
329 |
+
- type: cosine_precision@10
|
330 |
+
value: 0.10000000000000002
|
331 |
+
name: Cosine Precision@10
|
332 |
+
- type: cosine_recall@1
|
333 |
+
value: 0.9583333333333334
|
334 |
+
name: Cosine Recall@1
|
335 |
+
- type: cosine_recall@3
|
336 |
+
value: 1.0
|
337 |
+
name: Cosine Recall@3
|
338 |
+
- type: cosine_recall@5
|
339 |
+
value: 1.0
|
340 |
+
name: Cosine Recall@5
|
341 |
+
- type: cosine_recall@10
|
342 |
+
value: 1.0
|
343 |
+
name: Cosine Recall@10
|
344 |
+
- type: cosine_ndcg@10
|
345 |
+
value: 0.9846220730654774
|
346 |
+
name: Cosine Ndcg@10
|
347 |
+
- type: cosine_mrr@10
|
348 |
+
value: 0.9791666666666666
|
349 |
+
name: Cosine Mrr@10
|
350 |
+
- type: cosine_map@100
|
351 |
+
value: 0.9791666666666666
|
352 |
+
name: Cosine Map@100
|
353 |
+
---
|
354 |
+
|
355 |
+
# SentenceTransformer based on Snowflake/snowflake-arctic-embed-m
|
356 |
+
|
357 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Snowflake/snowflake-arctic-embed-m](https://huggingface.co/Snowflake/snowflake-arctic-embed-m). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
358 |
+
|
359 |
+
## Model Details
|
360 |
+
|
361 |
+
### Model Description
|
362 |
+
- **Model Type:** Sentence Transformer
|
363 |
+
- **Base model:** [Snowflake/snowflake-arctic-embed-m](https://huggingface.co/Snowflake/snowflake-arctic-embed-m) <!-- at revision fc74610d18462d218e312aa986ec5c8a75a98152 -->
|
364 |
+
- **Maximum Sequence Length:** 512 tokens
|
365 |
+
- **Output Dimensionality:** 768 dimensions
|
366 |
+
- **Similarity Function:** Cosine Similarity
|
367 |
+
<!-- - **Training Dataset:** Unknown -->
|
368 |
+
<!-- - **Language:** Unknown -->
|
369 |
+
<!-- - **License:** Unknown -->
|
370 |
+
|
371 |
+
### Model Sources
|
372 |
+
|
373 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
374 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
375 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
376 |
+
|
377 |
+
### Full Model Architecture
|
378 |
+
|
379 |
+
```
|
380 |
+
SentenceTransformer(
|
381 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
|
382 |
+
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
383 |
+
(2): Normalize()
|
384 |
+
)
|
385 |
+
```
|
386 |
+
|
387 |
+
## Usage
|
388 |
+
|
389 |
+
### Direct Usage (Sentence Transformers)
|
390 |
+
|
391 |
+
First install the Sentence Transformers library:
|
392 |
+
|
393 |
+
```bash
|
394 |
+
pip install -U sentence-transformers
|
395 |
+
```
|
396 |
+
|
397 |
+
Then you can load this model and run inference.
|
398 |
+
```python
|
399 |
+
from sentence_transformers import SentenceTransformer
|
400 |
+
|
401 |
+
# Download from the 🤗 Hub
|
402 |
+
model = SentenceTransformer("llm-wizard/legal-ft-v0-midterm")
|
403 |
+
# Run inference
|
404 |
+
sentences = [
|
405 |
+
'How does the review of 2024 compare to the review of 2023 regarding advancements in LLMs?',
|
406 |
+
'Things we learned about LLMs in 2024\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nSimon Willison’s Weblog\nSubscribe\n\n\n\n\n\n\nThings we learned about LLMs in 2024\n31st December 2024\nA lot has happened in the world of Large Language Models over the course of 2024. Here’s a review of things we figured out about the field in the past twelve months, plus my attempt at identifying key themes and pivotal moments.\nThis is a sequel to my review of 2023.\nIn this article:',
|
407 |
+
'The GPT-4 barrier was comprehensively broken\nIn my December 2023 review I wrote about how We don’t yet know how to build GPT-4—OpenAI’s best model was almost a year old at that point, yet no other AI lab had produced anything better. What did OpenAI know that the rest of us didn’t?\nI’m relieved that this has changed completely in the past twelve months. 18 organizations now have models on the Chatbot Arena Leaderboard that rank higher than the original GPT-4 from March 2023 (GPT-4-0314 on the board)—70 models in total.',
|
408 |
+
]
|
409 |
+
embeddings = model.encode(sentences)
|
410 |
+
print(embeddings.shape)
|
411 |
+
# [3, 768]
|
412 |
+
|
413 |
+
# Get the similarity scores for the embeddings
|
414 |
+
similarities = model.similarity(embeddings, embeddings)
|
415 |
+
print(similarities.shape)
|
416 |
+
# [3, 3]
|
417 |
+
```
|
418 |
+
|
419 |
+
<!--
|
420 |
+
### Direct Usage (Transformers)
|
421 |
+
|
422 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
423 |
+
|
424 |
+
</details>
|
425 |
+
-->
|
426 |
+
|
427 |
+
<!--
|
428 |
+
### Downstream Usage (Sentence Transformers)
|
429 |
+
|
430 |
+
You can finetune this model on your own dataset.
|
431 |
+
|
432 |
+
<details><summary>Click to expand</summary>
|
433 |
+
|
434 |
+
</details>
|
435 |
+
-->
|
436 |
+
|
437 |
+
<!--
|
438 |
+
### Out-of-Scope Use
|
439 |
+
|
440 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
441 |
+
-->
|
442 |
+
|
443 |
+
## Evaluation
|
444 |
+
|
445 |
+
### Metrics
|
446 |
+
|
447 |
+
#### Information Retrieval
|
448 |
+
|
449 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
450 |
+
|
451 |
+
| Metric | Value |
|
452 |
+
|:--------------------|:-----------|
|
453 |
+
| cosine_accuracy@1 | 0.9583 |
|
454 |
+
| cosine_accuracy@3 | 1.0 |
|
455 |
+
| cosine_accuracy@5 | 1.0 |
|
456 |
+
| cosine_accuracy@10 | 1.0 |
|
457 |
+
| cosine_precision@1 | 0.9583 |
|
458 |
+
| cosine_precision@3 | 0.3333 |
|
459 |
+
| cosine_precision@5 | 0.2 |
|
460 |
+
| cosine_precision@10 | 0.1 |
|
461 |
+
| cosine_recall@1 | 0.9583 |
|
462 |
+
| cosine_recall@3 | 1.0 |
|
463 |
+
| cosine_recall@5 | 1.0 |
|
464 |
+
| cosine_recall@10 | 1.0 |
|
465 |
+
| **cosine_ndcg@10** | **0.9846** |
|
466 |
+
| cosine_mrr@10 | 0.9792 |
|
467 |
+
| cosine_map@100 | 0.9792 |
|
468 |
+
|
469 |
+
<!--
|
470 |
+
## Bias, Risks and Limitations
|
471 |
+
|
472 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
473 |
+
-->
|
474 |
+
|
475 |
+
<!--
|
476 |
+
### Recommendations
|
477 |
+
|
478 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
479 |
+
-->
|
480 |
+
|
481 |
+
## Training Details
|
482 |
+
|
483 |
+
### Training Dataset
|
484 |
+
|
485 |
+
#### Unnamed Dataset
|
486 |
+
|
487 |
+
* Size: 156 training samples
|
488 |
+
* Columns: <code>sentence_0</code> and <code>sentence_1</code>
|
489 |
+
* Approximate statistics based on the first 156 samples:
|
490 |
+
| | sentence_0 | sentence_1 |
|
491 |
+
|:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
|
492 |
+
| type | string | string |
|
493 |
+
| details | <ul><li>min: 12 tokens</li><li>mean: 20.25 tokens</li><li>max: 31 tokens</li></ul> | <ul><li>min: 43 tokens</li><li>mean: 135.18 tokens</li><li>max: 214 tokens</li></ul> |
|
494 |
+
* Samples:
|
495 |
+
| sentence_0 | sentence_1 |
|
496 |
+
|:-------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
497 |
+
| <code>What topics were covered in the annotated presentations given in 2023?</code> | <code>I also gave a bunch of talks and podcast appearances. I’ve started habitually turning my talks into annotated presentations—here are my best from 2023:<br><br>Prompt injection explained, with video, slides, and a transcript<br>Catching up on the weird world of LLMs<br>Making Large Language Models work for you<br>Open questions for AI engineering<br>Embeddings: What they are and why they matter<br>Financial sustainability for open source projects at GitHub Universe<br><br>And in podcasts:<br><br><br>What AI can do for you on the Theory of Change<br><br>Working in public on Path to Citus Con<br><br>LLMs break the internet on the Changelog<br><br>Talking Large Language Models on Rooftop Ruby<br><br>Thoughts on the OpenAI board situation on Newsroom Robots</code> |
|
498 |
+
| <code>Which podcasts featured discussions about Large Language Models?</code> | <code>I also gave a bunch of talks and podcast appearances. I’ve started habitually turning my talks into annotated presentations—here are my best from 2023:<br><br>Prompt injection explained, with video, slides, and a transcript<br>Catching up on the weird world of LLMs<br>Making Large Language Models work for you<br>Open questions for AI engineering<br>Embeddings: What they are and why they matter<br>Financial sustainability for open source projects at GitHub Universe<br><br>And in podcasts:<br><br><br>What AI can do for you on the Theory of Change<br><br>Working in public on Path to Citus Con<br><br>LLMs break the internet on the Changelog<br><br>Talking Large Language Models on Rooftop Ruby<br><br>Thoughts on the OpenAI board situation on Newsroom Robots</code> |
|
499 |
+
| <code>What capabilities does Google’s Gemini have regarding audio input and output?</code> | <code>Your browser does not support the audio element.<br><br>OpenAI aren’t the only group with a multi-modal audio model. Google’s Gemini also accepts audio input, and the Google Gemini apps can speak in a similar way to ChatGPT now. Amazon also pre-announced voice mode for Amazon Nova, but that’s meant to roll out in Q1 of 2025.<br>Google’s NotebookLM, released in September, took audio output to a new level by producing spookily realistic conversations between two “podcast hosts” about anything you fed into their tool. They later added custom instructions, so naturally I turned them into pelicans:<br><br><br>Your browser does not support the audio element.</code> |
|
500 |
+
* Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters:
|
501 |
+
```json
|
502 |
+
{
|
503 |
+
"loss": "MultipleNegativesRankingLoss",
|
504 |
+
"matryoshka_dims": [
|
505 |
+
768,
|
506 |
+
512,
|
507 |
+
256,
|
508 |
+
128,
|
509 |
+
64
|
510 |
+
],
|
511 |
+
"matryoshka_weights": [
|
512 |
+
1,
|
513 |
+
1,
|
514 |
+
1,
|
515 |
+
1,
|
516 |
+
1
|
517 |
+
],
|
518 |
+
"n_dims_per_step": -1
|
519 |
+
}
|
520 |
+
```
|
521 |
+
|
522 |
+
### Training Hyperparameters
|
523 |
+
#### Non-Default Hyperparameters
|
524 |
+
|
525 |
+
- `eval_strategy`: steps
|
526 |
+
- `per_device_train_batch_size`: 10
|
527 |
+
- `per_device_eval_batch_size`: 10
|
528 |
+
- `num_train_epochs`: 10
|
529 |
+
- `multi_dataset_batch_sampler`: round_robin
|
530 |
+
|
531 |
+
#### All Hyperparameters
|
532 |
+
<details><summary>Click to expand</summary>
|
533 |
+
|
534 |
+
- `overwrite_output_dir`: False
|
535 |
+
- `do_predict`: False
|
536 |
+
- `eval_strategy`: steps
|
537 |
+
- `prediction_loss_only`: True
|
538 |
+
- `per_device_train_batch_size`: 10
|
539 |
+
- `per_device_eval_batch_size`: 10
|
540 |
+
- `per_gpu_train_batch_size`: None
|
541 |
+
- `per_gpu_eval_batch_size`: None
|
542 |
+
- `gradient_accumulation_steps`: 1
|
543 |
+
- `eval_accumulation_steps`: None
|
544 |
+
- `torch_empty_cache_steps`: None
|
545 |
+
- `learning_rate`: 5e-05
|
546 |
+
- `weight_decay`: 0.0
|
547 |
+
- `adam_beta1`: 0.9
|
548 |
+
- `adam_beta2`: 0.999
|
549 |
+
- `adam_epsilon`: 1e-08
|
550 |
+
- `max_grad_norm`: 1
|
551 |
+
- `num_train_epochs`: 10
|
552 |
+
- `max_steps`: -1
|
553 |
+
- `lr_scheduler_type`: linear
|
554 |
+
- `lr_scheduler_kwargs`: {}
|
555 |
+
- `warmup_ratio`: 0.0
|
556 |
+
- `warmup_steps`: 0
|
557 |
+
- `log_level`: passive
|
558 |
+
- `log_level_replica`: warning
|
559 |
+
- `log_on_each_node`: True
|
560 |
+
- `logging_nan_inf_filter`: True
|
561 |
+
- `save_safetensors`: True
|
562 |
+
- `save_on_each_node`: False
|
563 |
+
- `save_only_model`: False
|
564 |
+
- `restore_callback_states_from_checkpoint`: False
|
565 |
+
- `no_cuda`: False
|
566 |
+
- `use_cpu`: False
|
567 |
+
- `use_mps_device`: False
|
568 |
+
- `seed`: 42
|
569 |
+
- `data_seed`: None
|
570 |
+
- `jit_mode_eval`: False
|
571 |
+
- `use_ipex`: False
|
572 |
+
- `bf16`: False
|
573 |
+
- `fp16`: False
|
574 |
+
- `fp16_opt_level`: O1
|
575 |
+
- `half_precision_backend`: auto
|
576 |
+
- `bf16_full_eval`: False
|
577 |
+
- `fp16_full_eval`: False
|
578 |
+
- `tf32`: None
|
579 |
+
- `local_rank`: 0
|
580 |
+
- `ddp_backend`: None
|
581 |
+
- `tpu_num_cores`: None
|
582 |
+
- `tpu_metrics_debug`: False
|
583 |
+
- `debug`: []
|
584 |
+
- `dataloader_drop_last`: False
|
585 |
+
- `dataloader_num_workers`: 0
|
586 |
+
- `dataloader_prefetch_factor`: None
|
587 |
+
- `past_index`: -1
|
588 |
+
- `disable_tqdm`: False
|
589 |
+
- `remove_unused_columns`: True
|
590 |
+
- `label_names`: None
|
591 |
+
- `load_best_model_at_end`: False
|
592 |
+
- `ignore_data_skip`: False
|
593 |
+
- `fsdp`: []
|
594 |
+
- `fsdp_min_num_params`: 0
|
595 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
596 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
597 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
598 |
+
- `deepspeed`: None
|
599 |
+
- `label_smoothing_factor`: 0.0
|
600 |
+
- `optim`: adamw_torch
|
601 |
+
- `optim_args`: None
|
602 |
+
- `adafactor`: False
|
603 |
+
- `group_by_length`: False
|
604 |
+
- `length_column_name`: length
|
605 |
+
- `ddp_find_unused_parameters`: None
|
606 |
+
- `ddp_bucket_cap_mb`: None
|
607 |
+
- `ddp_broadcast_buffers`: False
|
608 |
+
- `dataloader_pin_memory`: True
|
609 |
+
- `dataloader_persistent_workers`: False
|
610 |
+
- `skip_memory_metrics`: True
|
611 |
+
- `use_legacy_prediction_loop`: False
|
612 |
+
- `push_to_hub`: False
|
613 |
+
- `resume_from_checkpoint`: None
|
614 |
+
- `hub_model_id`: None
|
615 |
+
- `hub_strategy`: every_save
|
616 |
+
- `hub_private_repo`: None
|
617 |
+
- `hub_always_push`: False
|
618 |
+
- `gradient_checkpointing`: False
|
619 |
+
- `gradient_checkpointing_kwargs`: None
|
620 |
+
- `include_inputs_for_metrics`: False
|
621 |
+
- `include_for_metrics`: []
|
622 |
+
- `eval_do_concat_batches`: True
|
623 |
+
- `fp16_backend`: auto
|
624 |
+
- `push_to_hub_model_id`: None
|
625 |
+
- `push_to_hub_organization`: None
|
626 |
+
- `mp_parameters`:
|
627 |
+
- `auto_find_batch_size`: False
|
628 |
+
- `full_determinism`: False
|
629 |
+
- `torchdynamo`: None
|
630 |
+
- `ray_scope`: last
|
631 |
+
- `ddp_timeout`: 1800
|
632 |
+
- `torch_compile`: False
|
633 |
+
- `torch_compile_backend`: None
|
634 |
+
- `torch_compile_mode`: None
|
635 |
+
- `dispatch_batches`: None
|
636 |
+
- `split_batches`: None
|
637 |
+
- `include_tokens_per_second`: False
|
638 |
+
- `include_num_input_tokens_seen`: False
|
639 |
+
- `neftune_noise_alpha`: None
|
640 |
+
- `optim_target_modules`: None
|
641 |
+
- `batch_eval_metrics`: False
|
642 |
+
- `eval_on_start`: False
|
643 |
+
- `use_liger_kernel`: False
|
644 |
+
- `eval_use_gather_object`: False
|
645 |
+
- `average_tokens_across_devices`: False
|
646 |
+
- `prompts`: None
|
647 |
+
- `batch_sampler`: batch_sampler
|
648 |
+
- `multi_dataset_batch_sampler`: round_robin
|
649 |
+
|
650 |
+
</details>
|
651 |
+
|
652 |
+
### Training Logs
|
653 |
+
| Epoch | Step | cosine_ndcg@10 |
|
654 |
+
|:-----:|:----:|:--------------:|
|
655 |
+
| 1.0 | 16 | 0.8825 |
|
656 |
+
| 2.0 | 32 | 0.9526 |
|
657 |
+
| 3.0 | 48 | 0.9609 |
|
658 |
+
| 3.125 | 50 | 0.9609 |
|
659 |
+
| 4.0 | 64 | 0.9846 |
|
660 |
+
| 5.0 | 80 | 0.9846 |
|
661 |
+
| 6.0 | 96 | 0.9846 |
|
662 |
+
| 6.25 | 100 | 0.9846 |
|
663 |
+
| 7.0 | 112 | 0.9846 |
|
664 |
+
| 8.0 | 128 | 0.9846 |
|
665 |
+
| 9.0 | 144 | 0.9846 |
|
666 |
+
| 9.375 | 150 | 0.9846 |
|
667 |
+
| 10.0 | 160 | 0.9846 |
|
668 |
+
|
669 |
+
|
670 |
+
### Framework Versions
|
671 |
+
- Python: 3.11.11
|
672 |
+
- Sentence Transformers: 3.4.1
|
673 |
+
- Transformers: 4.48.3
|
674 |
+
- PyTorch: 2.5.1+cu124
|
675 |
+
- Accelerate: 1.3.0
|
676 |
+
- Datasets: 3.3.1
|
677 |
+
- Tokenizers: 0.21.0
|
678 |
+
|
679 |
+
## Citation
|
680 |
+
|
681 |
+
### BibTeX
|
682 |
+
|
683 |
+
#### Sentence Transformers
|
684 |
+
```bibtex
|
685 |
+
@inproceedings{reimers-2019-sentence-bert,
|
686 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
687 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
688 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
689 |
+
month = "11",
|
690 |
+
year = "2019",
|
691 |
+
publisher = "Association for Computational Linguistics",
|
692 |
+
url = "https://arxiv.org/abs/1908.10084",
|
693 |
+
}
|
694 |
+
```
|
695 |
+
|
696 |
+
#### MatryoshkaLoss
|
697 |
+
```bibtex
|
698 |
+
@misc{kusupati2024matryoshka,
|
699 |
+
title={Matryoshka Representation Learning},
|
700 |
+
author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi},
|
701 |
+
year={2024},
|
702 |
+
eprint={2205.13147},
|
703 |
+
archivePrefix={arXiv},
|
704 |
+
primaryClass={cs.LG}
|
705 |
+
}
|
706 |
+
```
|
707 |
+
|
708 |
+
#### MultipleNegativesRankingLoss
|
709 |
+
```bibtex
|
710 |
+
@misc{henderson2017efficient,
|
711 |
+
title={Efficient Natural Language Response Suggestion for Smart Reply},
|
712 |
+
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
|
713 |
+
year={2017},
|
714 |
+
eprint={1705.00652},
|
715 |
+
archivePrefix={arXiv},
|
716 |
+
primaryClass={cs.CL}
|
717 |
+
}
|
718 |
+
```
|
719 |
+
|
720 |
+
<!--
|
721 |
+
## Glossary
|
722 |
+
|
723 |
+
*Clearly define terms in order to be accessible across audiences.*
|
724 |
+
-->
|
725 |
+
|
726 |
+
<!--
|
727 |
+
## Model Card Authors
|
728 |
+
|
729 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
730 |
+
-->
|
731 |
+
|
732 |
+
<!--
|
733 |
+
## Model Card Contact
|
734 |
+
|
735 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
736 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Snowflake/snowflake-arctic-embed-m",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.48.3",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 30522
|
26 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.4.1",
|
4 |
+
"transformers": "4.48.3",
|
5 |
+
"pytorch": "2.5.1+cu124"
|
6 |
+
},
|
7 |
+
"prompts": {
|
8 |
+
"query": "Represent this sentence for searching relevant passages: "
|
9 |
+
},
|
10 |
+
"default_prompt_name": null,
|
11 |
+
"similarity_fn_name": "cosine"
|
12 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9947512bf3df6e3daaf136737da52aa13ff7702d6c9e5c5c454ec8d93afb3b2
|
3 |
+
size 435588776
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"extra_special_tokens": {},
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"max_length": 512,
|
50 |
+
"model_max_length": 512,
|
51 |
+
"pad_to_multiple_of": null,
|
52 |
+
"pad_token": "[PAD]",
|
53 |
+
"pad_token_type_id": 0,
|
54 |
+
"padding_side": "right",
|
55 |
+
"sep_token": "[SEP]",
|
56 |
+
"stride": 0,
|
57 |
+
"strip_accents": null,
|
58 |
+
"tokenize_chinese_chars": true,
|
59 |
+
"tokenizer_class": "BertTokenizer",
|
60 |
+
"truncation_side": "right",
|
61 |
+
"truncation_strategy": "longest_first",
|
62 |
+
"unk_token": "[UNK]"
|
63 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|