Upload folder using huggingface_hub
Browse files- 1_Pooling/config.json +10 -0
- README.md +196 -0
- config.json +32 -0
- config_sentence_transformers.json +10 -0
- config_setfit.json +4 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: setfit
|
3 |
+
tags:
|
4 |
+
- setfit
|
5 |
+
- sentence-transformers
|
6 |
+
- text-classification
|
7 |
+
- generated_from_setfit_trainer
|
8 |
+
base_model: avsolatorio/GIST-Embedding-v0
|
9 |
+
metrics:
|
10 |
+
- accuracy
|
11 |
+
widget:
|
12 |
+
- text: The project is focused on developing a new employee benefits package designed
|
13 |
+
to attract and retain top talent. We will conduct competitive benchmarking to
|
14 |
+
understand industry standards, gather employee feedback to identify desired benefits,
|
15 |
+
and create a comprehensive package that includes health, wellness, and financial
|
16 |
+
incentives.
|
17 |
+
- text: A tire manufacturing company created a new belt to be used as part of tread
|
18 |
+
cooling during the manufacturing process. Such a belt is not commercially available.
|
19 |
+
- text: Covers tasks related to data quality and compliance. This includes handling
|
20 |
+
data errors, updating data catalog definitions, and implementing compliance updates.
|
21 |
+
The project aims to ensure the accuracy, completeness, and compliance of the company's
|
22 |
+
data, thereby increasing its reliability and trustworthiness.
|
23 |
+
- text: Involves the development, testing, and maintenance of the Huntress agent software.
|
24 |
+
This includes fixing bugs, improving error handling, and adding new functionalities.
|
25 |
+
The project ensures the agent software is reliable and effective in protecting
|
26 |
+
customer systems.
|
27 |
+
- text: This project involved integrating an off-the-shelf software program into the
|
28 |
+
company's existing software infrastructure with the goal of improving the customer
|
29 |
+
data allocation and retention processes. The design and development of the integrations
|
30 |
+
required to succesfully launch the program within the company's existing software
|
31 |
+
architecture required the Python programming language. This development required
|
32 |
+
the performance of siginificant testing in an iterative nature by the development
|
33 |
+
team because Python had never been used to integrate applications within the company's
|
34 |
+
platform previously.
|
35 |
+
pipeline_tag: text-classification
|
36 |
+
inference: true
|
37 |
+
---
|
38 |
+
|
39 |
+
# SetFit with avsolatorio/GIST-Embedding-v0
|
40 |
+
|
41 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [avsolatorio/GIST-Embedding-v0](https://huggingface.co/avsolatorio/GIST-Embedding-v0) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
42 |
+
|
43 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
44 |
+
|
45 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
46 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
47 |
+
|
48 |
+
## Model Details
|
49 |
+
|
50 |
+
### Model Description
|
51 |
+
- **Model Type:** SetFit
|
52 |
+
- **Sentence Transformer body:** [avsolatorio/GIST-Embedding-v0](https://huggingface.co/avsolatorio/GIST-Embedding-v0)
|
53 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
54 |
+
- **Maximum Sequence Length:** 512 tokens
|
55 |
+
- **Number of Classes:** 2 classes
|
56 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
57 |
+
<!-- - **Language:** Unknown -->
|
58 |
+
<!-- - **License:** Unknown -->
|
59 |
+
|
60 |
+
### Model Sources
|
61 |
+
|
62 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
63 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
64 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
65 |
+
|
66 |
+
### Model Labels
|
67 |
+
| Label | Examples |
|
68 |
+
|:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
69 |
+
| 0 | <ul><li>"A manufacturing corporation undertakes an initiative to restructure its manufacturing organization by designing an organizational structure that will improve the company's business operations"</li><li>"Centers on the production of content for the Brief product. This includes tasks related to drafting insights, creating case studies, and publishing social media posts. The project aims to provide valuable and timely information to Kharon's clients, helping them stay informed about global security topics that impact their commercial activities."</li><li>'The team is developing a comprehensive marketing strategy to increase brand awareness and customer engagement. This includes creating targeted advertising campaigns, optimizing our social media presence, and collaborating with influencers to promote our products. We will also analyze market trends and consumer behavior to refine our approach.'</li></ul> |
|
70 |
+
| 1 | <ul><li>"Project focused on enhancing the website's functionality, including tasks related to optimizing search functionality and integrating new features such as bookmarks and table of contents for the web reader. The project aims to provide a seamless online experience for customers by improving the efficiency and speed of our website."</li><li>'Design and create an innovative drug delivery system for cancer treatment compatible with different types of cancer and different patient profiles while minimizing negative impacts on healthy tissues'</li><li>'Develop a new and advanced Natural Language Processing (NLP) algorithm to enhance the capabilities of virtual assistants used in various applications, such as customer service chatbots. This project involved improving the NLP algorithm to be more responsive in the area of complex natural language understanding, including context comprehension, sentiment analysis, and accurate response generation'</li></ul> |
|
71 |
+
|
72 |
+
## Uses
|
73 |
+
|
74 |
+
### Direct Use for Inference
|
75 |
+
|
76 |
+
First install the SetFit library:
|
77 |
+
|
78 |
+
```bash
|
79 |
+
pip install setfit
|
80 |
+
```
|
81 |
+
|
82 |
+
Then you can load this model and run inference.
|
83 |
+
|
84 |
+
```python
|
85 |
+
from setfit import SetFitModel
|
86 |
+
|
87 |
+
# Download from the 🤗 Hub
|
88 |
+
model = SetFitModel.from_pretrained("setfit_model_id")
|
89 |
+
# Run inference
|
90 |
+
preds = model("A tire manufacturing company created a new belt to be used as part of tread cooling during the manufacturing process. Such a belt is not commercially available.")
|
91 |
+
```
|
92 |
+
|
93 |
+
<!--
|
94 |
+
### Downstream Use
|
95 |
+
|
96 |
+
*List how someone could finetune this model on their own dataset.*
|
97 |
+
-->
|
98 |
+
|
99 |
+
<!--
|
100 |
+
### Out-of-Scope Use
|
101 |
+
|
102 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
103 |
+
-->
|
104 |
+
|
105 |
+
<!--
|
106 |
+
## Bias, Risks and Limitations
|
107 |
+
|
108 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
109 |
+
-->
|
110 |
+
|
111 |
+
<!--
|
112 |
+
### Recommendations
|
113 |
+
|
114 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
115 |
+
-->
|
116 |
+
|
117 |
+
## Training Details
|
118 |
+
|
119 |
+
### Training Set Metrics
|
120 |
+
| Training set | Min | Median | Max |
|
121 |
+
|:-------------|:----|:-------|:----|
|
122 |
+
| Word count | 23 | 43.5 | 54 |
|
123 |
+
|
124 |
+
| Label | Training Sample Count |
|
125 |
+
|:------|:----------------------|
|
126 |
+
| 0 | 8 |
|
127 |
+
| 1 | 16 |
|
128 |
+
|
129 |
+
### Training Hyperparameters
|
130 |
+
- batch_size: (16, 16)
|
131 |
+
- num_epochs: (3, 3)
|
132 |
+
- max_steps: -1
|
133 |
+
- sampling_strategy: oversampling
|
134 |
+
- num_iterations: 20
|
135 |
+
- body_learning_rate: (0.0001, 0.0001)
|
136 |
+
- head_learning_rate: 0.0001
|
137 |
+
- loss: CosineSimilarityLoss
|
138 |
+
- distance_metric: cosine_distance
|
139 |
+
- margin: 0.25
|
140 |
+
- end_to_end: False
|
141 |
+
- use_amp: False
|
142 |
+
- warmup_proportion: 0.1
|
143 |
+
- seed: 42
|
144 |
+
- eval_max_steps: -1
|
145 |
+
- load_best_model_at_end: False
|
146 |
+
|
147 |
+
### Training Results
|
148 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
149 |
+
|:------:|:----:|:-------------:|:---------------:|
|
150 |
+
| 0.0167 | 1 | 0.2764 | - |
|
151 |
+
| 0.8333 | 50 | 0.0014 | - |
|
152 |
+
| 1.6667 | 100 | 0.0011 | - |
|
153 |
+
| 2.5 | 150 | 0.0011 | - |
|
154 |
+
|
155 |
+
### Framework Versions
|
156 |
+
- Python: 3.9.16
|
157 |
+
- SetFit: 1.0.3
|
158 |
+
- Sentence Transformers: 3.0.1
|
159 |
+
- Transformers: 4.39.0
|
160 |
+
- PyTorch: 2.3.1
|
161 |
+
- Datasets: 2.19.2
|
162 |
+
- Tokenizers: 0.15.2
|
163 |
+
|
164 |
+
## Citation
|
165 |
+
|
166 |
+
### BibTeX
|
167 |
+
```bibtex
|
168 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
169 |
+
doi = {10.48550/ARXIV.2209.11055},
|
170 |
+
url = {https://arxiv.org/abs/2209.11055},
|
171 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
172 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
173 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
174 |
+
publisher = {arXiv},
|
175 |
+
year = {2022},
|
176 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
177 |
+
}
|
178 |
+
```
|
179 |
+
|
180 |
+
<!--
|
181 |
+
## Glossary
|
182 |
+
|
183 |
+
*Clearly define terms in order to be accessible across audiences.*
|
184 |
+
-->
|
185 |
+
|
186 |
+
<!--
|
187 |
+
## Model Card Authors
|
188 |
+
|
189 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
190 |
+
-->
|
191 |
+
|
192 |
+
<!--
|
193 |
+
## Model Card Contact
|
194 |
+
|
195 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
196 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "avsolatorio/GIST-Embedding-v0",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"id2label": {
|
13 |
+
"0": "LABEL_0"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 3072,
|
17 |
+
"label2id": {
|
18 |
+
"LABEL_0": 0
|
19 |
+
},
|
20 |
+
"layer_norm_eps": 1e-12,
|
21 |
+
"max_position_embeddings": 512,
|
22 |
+
"model_type": "bert",
|
23 |
+
"num_attention_heads": 12,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"pad_token_id": 0,
|
26 |
+
"position_embedding_type": "absolute",
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.39.0",
|
29 |
+
"type_vocab_size": 2,
|
30 |
+
"use_cache": true,
|
31 |
+
"vocab_size": 30522
|
32 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.0.1",
|
4 |
+
"transformers": "4.39.0",
|
5 |
+
"pytorch": "2.3.1"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"normalize_embeddings": false,
|
3 |
+
"labels": null
|
4 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2e300fffb3d173f7978b3daa91848856ac9dccad833fa9145f6e421b7a182f7
|
3 |
+
size 437951328
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35004dff48472d1b4b3c7d94f5f75fe1ea4c394640cec0262b715139cae6c1ff
|
3 |
+
size 7007
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": true
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|