JordanTallon
commited on
Commit
•
5b3ddba
1
Parent(s):
03443aa
Push model using huggingface_hub.
Browse files- 1_Pooling/config.json +7 -0
- README.md +233 -167
- config.json +18 -22
- config_sentence_transformers.json +7 -0
- config_setfit.json +8 -0
- model.safetensors +2 -2
- model_head.pkl +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- tokenizer.json +2 -4
- tokenizer_config.json +3 -1
1_Pooling/config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 384,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false
|
7 |
+
}
|
README.md
CHANGED
@@ -1,201 +1,267 @@
|
|
1 |
---
|
2 |
-
library_name:
|
3 |
-
tags:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
---
|
5 |
|
6 |
-
#
|
7 |
|
8 |
-
|
9 |
|
|
|
10 |
|
|
|
|
|
11 |
|
12 |
## Model Details
|
13 |
|
14 |
### Model Description
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
-
|
20 |
-
- **Developed by:** [More Information Needed]
|
21 |
-
- **Funded by [optional]:** [More Information Needed]
|
22 |
-
- **Shared by [optional]:** [More Information Needed]
|
23 |
-
- **Model type:** [More Information Needed]
|
24 |
-
- **Language(s) (NLP):** [More Information Needed]
|
25 |
-
- **License:** [More Information Needed]
|
26 |
-
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
-
|
28 |
-
### Model Sources [optional]
|
29 |
-
|
30 |
-
<!-- Provide the basic links for the model. -->
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
35 |
|
36 |
## Uses
|
37 |
|
38 |
-
|
39 |
|
40 |
-
|
41 |
|
42 |
-
|
|
|
|
|
43 |
|
44 |
-
|
45 |
|
46 |
-
|
|
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
|
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
|
56 |
-
|
|
|
57 |
|
58 |
-
|
|
|
59 |
|
60 |
-
<!--
|
|
|
61 |
|
62 |
-
|
|
|
63 |
|
|
|
64 |
### Recommendations
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
-
|
70 |
-
## How to Get Started with the Model
|
71 |
-
|
72 |
-
Use the code below to get started with the model.
|
73 |
-
|
74 |
-
[More Information Needed]
|
75 |
|
76 |
## Training Details
|
77 |
|
78 |
-
### Training
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
###
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
## Model Card Contact
|
198 |
|
199 |
-
|
200 |
-
|
201 |
-
|
|
|
1 |
---
|
2 |
+
library_name: setfit
|
3 |
+
tags:
|
4 |
+
- setfit
|
5 |
+
- sentence-transformers
|
6 |
+
- text-classification
|
7 |
+
- generated_from_setfit_trainer
|
8 |
+
metrics:
|
9 |
+
- accuracy
|
10 |
+
widget:
|
11 |
+
- text: The ban, which went into effect in March 2019, was embraced by Trump following
|
12 |
+
a massacre that killed 58 people at a music festival in Las Vegas in which the
|
13 |
+
gunman used bump stocks.
|
14 |
+
- text: 'Now Modi has made international headlines for yet another similarity: He’s
|
15 |
+
constructing a massive wall … but unlike Trump’s goal of keeping immigrants out,
|
16 |
+
Modi’s wall was built to hide the country’s poverty from the gold-plated American
|
17 |
+
president.'
|
18 |
+
- text: 'Though banks have fled many low-income communities, there’s a post office
|
19 |
+
for almost every ZIP code in the country. '
|
20 |
+
- text: The administration has stonewalled Congress during the impeachment proceedings
|
21 |
+
and other investigations, but the American public overwhelmingly wants the Trump
|
22 |
+
administration to comply with lawmakers.
|
23 |
+
- text: The gun lobby has repeatedly claimed that using a gun in self-defense is a
|
24 |
+
common event, often going so far as to allege that Americans defend themselves
|
25 |
+
with guns millions of times a year.
|
26 |
+
pipeline_tag: text-classification
|
27 |
+
inference: true
|
28 |
+
base_model: BAAI/bge-small-en-v1.5
|
29 |
+
model-index:
|
30 |
+
- name: SetFit with BAAI/bge-small-en-v1.5
|
31 |
+
results:
|
32 |
+
- task:
|
33 |
+
type: text-classification
|
34 |
+
name: Text Classification
|
35 |
+
dataset:
|
36 |
+
name: Unknown
|
37 |
+
type: unknown
|
38 |
+
split: test
|
39 |
+
metrics:
|
40 |
+
- type: accuracy
|
41 |
+
value: 0.67003367003367
|
42 |
+
name: Accuracy
|
43 |
---
|
44 |
|
45 |
+
# SetFit with BAAI/bge-small-en-v1.5
|
46 |
|
47 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
48 |
|
49 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
50 |
|
51 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
52 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
53 |
|
54 |
## Model Details
|
55 |
|
56 |
### Model Description
|
57 |
+
- **Model Type:** SetFit
|
58 |
+
- **Sentence Transformer body:** [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5)
|
59 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
60 |
+
- **Maximum Sequence Length:** 512 tokens
|
61 |
+
- **Number of Classes:** 3 classes
|
62 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
63 |
+
<!-- - **Language:** Unknown -->
|
64 |
+
<!-- - **License:** Unknown -->
|
65 |
+
|
66 |
+
### Model Sources
|
67 |
+
|
68 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
69 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
70 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
71 |
+
|
72 |
+
### Model Labels
|
73 |
+
| Label | Examples |
|
74 |
+
|:-------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
75 |
+
| center | <ul><li>'A leading economist who vouched for Democratic presidential candidate Elizabeth Warren’s healthcare reform plan told Reuters on Thursday he doubts its staggering cost can be fully covered alongside her other government programs.'</li><li>'Labour leader Jeremy Corbyn unveiled his party’s election manifesto on Thursday, setting out radical plans to transform Britain with public sector pay rises, higher taxes on companies and a sweeping nationalisation of infrastructure.'</li><li>'Instagram will start blocking any hashtags spreading misinformation about vaccines, becoming the latest internet platform to crack down on bad health information.'</li></ul> |
|
76 |
+
| right | <ul><li>'Sanders praises the radical Green New Deal, champions a Medicare for All plan with a $34 trillion price tag, nods to abortion as a means of population control, and defends bread lines and Fidel Castro’s Cuba. '</li><li>'Since when did even conservative publications consider that it’s the right and moral thing to do to provide covering fire for an increasingly thuggish, openly hard-left, and borderline terroristic group which is less obviously to do with ‘racism’, but which has almost everything to do with smashing Western civilisation?'</li><li>'Local health officer\xa0Dr Rosana Salvaterra appeared to co-sign the demonstration,\xa0praising activists\xa0for wearing masks and claiming they obeyed social distancing protocols — although\xa0footage\xa0of the event strongly suggests that is\xa0not strictly accurate.'</li></ul> |
|
77 |
+
| left | <ul><li>'Activists planning to line California roadways with anti-vaccination billboards full of misinformation are paying for them through Facebook fundraisers, despite a platform-wide crackdown on such campaigns.'</li><li>'On Monday, as\xa0Common Dreams\xa0reported, Trump threatened to deploy federal forces to Chicago, Philadelphia, Detroit, Baltimore, and Oakland to confront Black Lives Matter protesters.'</li><li>"When the nation's highest civilian honor went to a right-wing media personality, it served as an oddly appropriate capstone to Trump's broader goals."</li></ul> |
|
78 |
|
79 |
+
## Evaluation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
+
### Metrics
|
82 |
+
| Label | Accuracy |
|
83 |
+
|:--------|:---------|
|
84 |
+
| **all** | 0.6700 |
|
85 |
|
86 |
## Uses
|
87 |
|
88 |
+
### Direct Use for Inference
|
89 |
|
90 |
+
First install the SetFit library:
|
91 |
|
92 |
+
```bash
|
93 |
+
pip install setfit
|
94 |
+
```
|
95 |
|
96 |
+
Then you can load this model and run inference.
|
97 |
|
98 |
+
```python
|
99 |
+
from setfit import SetFitModel
|
100 |
|
101 |
+
# Download from the 🤗 Hub
|
102 |
+
model = SetFitModel.from_pretrained("JordanTallon/Unifeed")
|
103 |
+
# Run inference
|
104 |
+
preds = model("Though banks have fled many low-income communities, there’s a post office for almost every ZIP code in the country. ")
|
105 |
+
```
|
106 |
|
107 |
+
<!--
|
108 |
+
### Downstream Use
|
109 |
|
110 |
+
*List how someone could finetune this model on their own dataset.*
|
111 |
+
-->
|
|
|
112 |
|
113 |
+
<!--
|
114 |
+
### Out-of-Scope Use
|
115 |
|
116 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
117 |
+
-->
|
118 |
|
119 |
+
<!--
|
120 |
+
## Bias, Risks and Limitations
|
121 |
|
122 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
123 |
+
-->
|
124 |
|
125 |
+
<!--
|
126 |
### Recommendations
|
127 |
|
128 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
129 |
+
-->
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
## Training Details
|
132 |
|
133 |
+
### Training Set Metrics
|
134 |
+
| Training set | Min | Median | Max |
|
135 |
+
|:-------------|:----|:--------|:----|
|
136 |
+
| Word count | 1 | 33.0139 | 195 |
|
137 |
+
|
138 |
+
| Label | Training Sample Count |
|
139 |
+
|:-------|:----------------------|
|
140 |
+
| center | 782 |
|
141 |
+
| left | 780 |
|
142 |
+
| right | 813 |
|
143 |
+
|
144 |
+
### Training Hyperparameters
|
145 |
+
- batch_size: (64, 64)
|
146 |
+
- num_epochs: (2, 2)
|
147 |
+
- max_steps: -1
|
148 |
+
- sampling_strategy: oversampling
|
149 |
+
- num_iterations: 20
|
150 |
+
- body_learning_rate: (2e-05, 2e-05)
|
151 |
+
- head_learning_rate: 2e-05
|
152 |
+
- loss: CosineSimilarityLoss
|
153 |
+
- distance_metric: cosine_distance
|
154 |
+
- margin: 0.25
|
155 |
+
- end_to_end: False
|
156 |
+
- use_amp: False
|
157 |
+
- warmup_proportion: 0.1
|
158 |
+
- seed: 42
|
159 |
+
- eval_max_steps: -1
|
160 |
+
- load_best_model_at_end: False
|
161 |
+
|
162 |
+
### Training Results
|
163 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
164 |
+
|:------:|:----:|:-------------:|:---------------:|
|
165 |
+
| 0.0007 | 1 | 0.2531 | - |
|
166 |
+
| 0.0337 | 50 | 0.253 | - |
|
167 |
+
| 0.0673 | 100 | 0.2491 | - |
|
168 |
+
| 0.1010 | 150 | 0.2592 | - |
|
169 |
+
| 0.1347 | 200 | 0.2476 | - |
|
170 |
+
| 0.1684 | 250 | 0.2282 | - |
|
171 |
+
| 0.2020 | 300 | 0.2222 | - |
|
172 |
+
| 0.2357 | 350 | 0.2196 | - |
|
173 |
+
| 0.2694 | 400 | 0.2199 | - |
|
174 |
+
| 0.3030 | 450 | 0.1821 | - |
|
175 |
+
| 0.3367 | 500 | 0.1819 | - |
|
176 |
+
| 0.3704 | 550 | 0.1327 | - |
|
177 |
+
| 0.4040 | 600 | 0.1193 | - |
|
178 |
+
| 0.4377 | 650 | 0.1652 | - |
|
179 |
+
| 0.4714 | 700 | 0.1059 | - |
|
180 |
+
| 0.5051 | 750 | 0.1141 | - |
|
181 |
+
| 0.5387 | 800 | 0.1103 | - |
|
182 |
+
| 0.5724 | 850 | 0.1138 | - |
|
183 |
+
| 0.6061 | 900 | 0.0894 | - |
|
184 |
+
| 0.6397 | 950 | 0.1138 | - |
|
185 |
+
| 0.6734 | 1000 | 0.11 | - |
|
186 |
+
| 0.7071 | 1050 | 0.1091 | - |
|
187 |
+
| 0.7407 | 1100 | 0.0804 | - |
|
188 |
+
| 0.7744 | 1150 | 0.1161 | - |
|
189 |
+
| 0.8081 | 1200 | 0.0715 | - |
|
190 |
+
| 0.8418 | 1250 | 0.1 | - |
|
191 |
+
| 0.8754 | 1300 | 0.0687 | - |
|
192 |
+
| 0.9091 | 1350 | 0.0488 | - |
|
193 |
+
| 0.9428 | 1400 | 0.0354 | - |
|
194 |
+
| 0.9764 | 1450 | 0.0244 | - |
|
195 |
+
| 1.0101 | 1500 | 0.02 | - |
|
196 |
+
| 1.0438 | 1550 | 0.0179 | - |
|
197 |
+
| 1.0774 | 1600 | 0.0219 | - |
|
198 |
+
| 1.1111 | 1650 | 0.0056 | - |
|
199 |
+
| 1.1448 | 1700 | 0.0169 | - |
|
200 |
+
| 1.1785 | 1750 | 0.0038 | - |
|
201 |
+
| 1.2121 | 1800 | 0.0139 | - |
|
202 |
+
| 1.2458 | 1850 | 0.0154 | - |
|
203 |
+
| 1.2795 | 1900 | 0.0118 | - |
|
204 |
+
| 1.3131 | 1950 | 0.0019 | - |
|
205 |
+
| 1.3468 | 2000 | 0.0016 | - |
|
206 |
+
| 1.3805 | 2050 | 0.0019 | - |
|
207 |
+
| 1.4141 | 2100 | 0.0016 | - |
|
208 |
+
| 1.4478 | 2150 | 0.0017 | - |
|
209 |
+
| 1.4815 | 2200 | 0.0011 | - |
|
210 |
+
| 1.5152 | 2250 | 0.0013 | - |
|
211 |
+
| 1.5488 | 2300 | 0.0123 | - |
|
212 |
+
| 1.5825 | 2350 | 0.0014 | - |
|
213 |
+
| 1.6162 | 2400 | 0.0013 | - |
|
214 |
+
| 1.6498 | 2450 | 0.001 | - |
|
215 |
+
| 1.6835 | 2500 | 0.0042 | - |
|
216 |
+
| 1.7172 | 2550 | 0.0017 | - |
|
217 |
+
| 1.7508 | 2600 | 0.0027 | - |
|
218 |
+
| 1.7845 | 2650 | 0.0016 | - |
|
219 |
+
| 1.8182 | 2700 | 0.0011 | - |
|
220 |
+
| 1.8519 | 2750 | 0.0014 | - |
|
221 |
+
| 1.8855 | 2800 | 0.0012 | - |
|
222 |
+
| 1.9192 | 2850 | 0.0012 | - |
|
223 |
+
| 1.9529 | 2900 | 0.0009 | - |
|
224 |
+
| 1.9865 | 2950 | 0.001 | - |
|
225 |
+
|
226 |
+
### Framework Versions
|
227 |
+
- Python: 3.10.12
|
228 |
+
- SetFit: 1.0.3
|
229 |
+
- Sentence Transformers: 2.2.2
|
230 |
+
- Transformers: 4.35.2
|
231 |
+
- PyTorch: 2.1.0+cu121
|
232 |
+
- Datasets: 2.16.1
|
233 |
+
- Tokenizers: 0.15.1
|
234 |
+
|
235 |
+
## Citation
|
236 |
+
|
237 |
+
### BibTeX
|
238 |
+
```bibtex
|
239 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
240 |
+
doi = {10.48550/ARXIV.2209.11055},
|
241 |
+
url = {https://arxiv.org/abs/2209.11055},
|
242 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
243 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
244 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
245 |
+
publisher = {arXiv},
|
246 |
+
year = {2022},
|
247 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
248 |
+
}
|
249 |
+
```
|
250 |
+
|
251 |
+
<!--
|
252 |
+
## Glossary
|
253 |
+
|
254 |
+
*Clearly define terms in order to be accessible across audiences.*
|
255 |
+
-->
|
256 |
+
|
257 |
+
<!--
|
258 |
+
## Model Card Authors
|
259 |
+
|
260 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
261 |
+
-->
|
262 |
+
|
263 |
+
<!--
|
264 |
## Model Card Contact
|
265 |
|
266 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
267 |
+
-->
|
|
config.json
CHANGED
@@ -1,35 +1,31 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
-
"activation": "gelu",
|
4 |
"architectures": [
|
5 |
-
"
|
6 |
],
|
7 |
-
"
|
8 |
-
"
|
9 |
-
"
|
10 |
-
"
|
|
|
11 |
"id2label": {
|
12 |
-
"0": "LABEL_0"
|
13 |
-
"1": "LABEL_1",
|
14 |
-
"2": "LABEL_2"
|
15 |
},
|
16 |
"initializer_range": 0.02,
|
|
|
17 |
"label2id": {
|
18 |
-
"LABEL_0": 0
|
19 |
-
"LABEL_1": 1,
|
20 |
-
"LABEL_2": 2
|
21 |
},
|
|
|
22 |
"max_position_embeddings": 512,
|
23 |
-
"model_type": "
|
24 |
-
"
|
25 |
-
"
|
26 |
"pad_token_id": 0,
|
27 |
-
"
|
28 |
-
"qa_dropout": 0.1,
|
29 |
-
"seq_classif_dropout": 0.2,
|
30 |
-
"sinusoidal_pos_embds": false,
|
31 |
-
"tie_weights_": true,
|
32 |
"torch_dtype": "float32",
|
33 |
-
"transformers_version": "4.
|
|
|
|
|
34 |
"vocab_size": 30522
|
35 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/root/.cache/torch/sentence_transformers/BAAI_bge-small-en-v1.5/",
|
|
|
3 |
"architectures": [
|
4 |
+
"BertModel"
|
5 |
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 384,
|
11 |
"id2label": {
|
12 |
+
"0": "LABEL_0"
|
|
|
|
|
13 |
},
|
14 |
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 1536,
|
16 |
"label2id": {
|
17 |
+
"LABEL_0": 0
|
|
|
|
|
18 |
},
|
19 |
+
"layer_norm_eps": 1e-12,
|
20 |
"max_position_embeddings": 512,
|
21 |
+
"model_type": "bert",
|
22 |
+
"num_attention_heads": 12,
|
23 |
+
"num_hidden_layers": 12,
|
24 |
"pad_token_id": 0,
|
25 |
+
"position_embedding_type": "absolute",
|
|
|
|
|
|
|
|
|
26 |
"torch_dtype": "float32",
|
27 |
+
"transformers_version": "4.35.2",
|
28 |
+
"type_vocab_size": 2,
|
29 |
+
"use_cache": true,
|
30 |
"vocab_size": 30522
|
31 |
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.2.2",
|
4 |
+
"transformers": "4.28.1",
|
5 |
+
"pytorch": "1.13.0+cu117"
|
6 |
+
}
|
7 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"normalize_embeddings": false,
|
3 |
+
"labels": [
|
4 |
+
"center",
|
5 |
+
"left",
|
6 |
+
"right"
|
7 |
+
]
|
8 |
+
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a81bd8924fa00d82337cceb9a0c535bc7271d064c81d2d9105d506b6050a4ac2
|
3 |
+
size 133462128
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a34c5cf85c7314e1103b5e1d923ef70f5f27a9866401f60d9b5a3f5b808f2605
|
3 |
+
size 10143
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": true
|
4 |
+
}
|
tokenizer.json
CHANGED
@@ -2,14 +2,12 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
-
"max_length":
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
-
"strategy":
|
11 |
-
"Fixed": 128
|
12 |
-
},
|
13 |
"direction": "Right",
|
14 |
"pad_to_multiple_of": null,
|
15 |
"pad_id": 0,
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
+
"max_length": 512,
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
+
"strategy": "BatchLongest",
|
|
|
|
|
11 |
"direction": "Right",
|
12 |
"pad_to_multiple_of": null,
|
13 |
"pad_id": 0,
|
tokenizer_config.json
CHANGED
@@ -43,13 +43,15 @@
|
|
43 |
},
|
44 |
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
|
|
46 |
"do_lower_case": true,
|
47 |
"mask_token": "[MASK]",
|
48 |
"model_max_length": 512,
|
|
|
49 |
"pad_token": "[PAD]",
|
50 |
"sep_token": "[SEP]",
|
51 |
"strip_accents": null,
|
52 |
"tokenize_chinese_chars": true,
|
53 |
-
"tokenizer_class": "
|
54 |
"unk_token": "[UNK]"
|
55 |
}
|
|
|
43 |
},
|
44 |
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
"do_lower_case": true,
|
48 |
"mask_token": "[MASK]",
|
49 |
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
"pad_token": "[PAD]",
|
52 |
"sep_token": "[SEP]",
|
53 |
"strip_accents": null,
|
54 |
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
"unk_token": "[UNK]"
|
57 |
}
|