Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,142 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
+
|
5 |
+
|
6 |
+
# Conversational Language Model Interface using FASTTEXT
|
7 |
+
|
8 |
+
This project provides a Command Line Interface (CLI) for interacting with a FastText language model, enabling users to generate text sequences based on their input. The script allows customization of parameters such as temperature, input text, top-k predictions, and model file path.
|
9 |
+
|
10 |
+
## Installation
|
11 |
+
|
12 |
+
Before running the script, ensure you have Python installed on your system. Additionally, you'll need to install the FastText library:
|
13 |
+
|
14 |
+
```bash
|
15 |
+
pip install fasttext
|
16 |
+
```
|
17 |
+
|
18 |
+
## Usage
|
19 |
+
|
20 |
+
To use the script, you should first obtain or train a FastText model. Place the model file (usually with a `.bin` extension) in a known directory.
|
21 |
+
|
22 |
+
The script can be executed with various command-line arguments to specify the behavior:
|
23 |
+
```python
|
24 |
+
import argparse
|
25 |
+
import fasttext
|
26 |
+
import numpy as np
|
27 |
+
|
28 |
+
def apply_repetition_penalty(labels, probabilities, used_labels, penalty_scale=1.9):
|
29 |
+
"""
|
30 |
+
Applies a repetition penalty to reduce the probability of already used labels.
|
31 |
+
|
32 |
+
:param labels: List of possible labels.
|
33 |
+
:param probabilities: Corresponding list of probabilities.
|
34 |
+
:param used_labels: Set of labels that have already been used.
|
35 |
+
:param penalty_scale: Scale of the penalty to be applied.
|
36 |
+
:return: Adjusted probabilities.
|
37 |
+
"""
|
38 |
+
adjusted_probabilities = probabilities.copy()
|
39 |
+
for i, label in enumerate(labels):
|
40 |
+
if label in used_labels:
|
41 |
+
adjusted_probabilities[i] /= penalty_scale
|
42 |
+
# Normalize the probabilities to sum to 1 again
|
43 |
+
adjusted_probabilities /= adjusted_probabilities.sum()
|
44 |
+
return adjusted_probabilities
|
45 |
+
|
46 |
+
def predict_sequence(model, text, sequence_length=20, temperature=.5, penalty_scale=1.9):
|
47 |
+
"""
|
48 |
+
Generates a sequence of labels using the FastText model with repetition penalty.
|
49 |
+
|
50 |
+
:param model: Loaded FastText model.
|
51 |
+
:param text: Initial text to start the prediction from.
|
52 |
+
:param sequence_length: Desired length of the sequence.
|
53 |
+
:param temperature: Temperature for sampling.
|
54 |
+
:param penalty_scale: Scale of repetition penalty.
|
55 |
+
:return: List of predicted labels.
|
56 |
+
"""
|
57 |
+
used_labels = set()
|
58 |
+
sequence = []
|
59 |
+
|
60 |
+
for _ in range(sequence_length):
|
61 |
+
# Predict the top k most probable labels
|
62 |
+
labels, probabilities = model.predict(text, k=40)
|
63 |
+
labels = [label.replace('__label__', '') for label in labels]
|
64 |
+
probabilities = np.array(probabilities)
|
65 |
+
|
66 |
+
# Adjust the probabilities with repetition penalty
|
67 |
+
probabilities = apply_repetition_penalty(labels, probabilities, used_labels, penalty_scale)
|
68 |
+
|
69 |
+
# Sampling according to the adjusted probabilities
|
70 |
+
label_index = np.random.choice(range(len(labels)), p=probabilities)
|
71 |
+
chosen_label = labels[label_index]
|
72 |
+
|
73 |
+
# Add the chosen label to the sequence and to the set of used labels
|
74 |
+
sequence.append(chosen_label)
|
75 |
+
used_labels.add(chosen_label)
|
76 |
+
|
77 |
+
# Update the text with the chosen label for the next prediction
|
78 |
+
text += ' ' + chosen_label
|
79 |
+
|
80 |
+
return sequence
|
81 |
+
|
82 |
+
def generate_response(model, input_text, sequence_length=512, temperature=.5, penalty_scale=1.9):
|
83 |
+
generated_sequence = predict_sequence(model, input_text, sequence_length, temperature, penalty_scale)
|
84 |
+
return ' '.join(generated_sequence)
|
85 |
+
|
86 |
+
def main():
|
87 |
+
parser = argparse.ArgumentParser(description="Run the language model with specified parameters.")
|
88 |
+
parser.add_argument('-t', '--temperature', type=float, default=0.5, help='Temperature for sampling.')
|
89 |
+
parser.add_argument('-f', '--file', type=str, help='File containing input text.')
|
90 |
+
parser.add_argument('-p', '--text', type=str, help='Direct input text.')
|
91 |
+
parser.add_argument('-n', '--length', type=int, default=50, help='length predictions to consider.')
|
92 |
+
parser.add_argument('-m', '--model', type=str, required=True, help='Address of the FastText model file.')
|
93 |
+
|
94 |
+
args = parser.parse_args()
|
95 |
+
|
96 |
+
# Load the model
|
97 |
+
model = fasttext.load_model(args.model)
|
98 |
+
|
99 |
+
input_text = ''
|
100 |
+
if args.file:
|
101 |
+
with open(args.file, 'r') as file:
|
102 |
+
input_text = file.read()
|
103 |
+
elif args.text:
|
104 |
+
input_text = args.text
|
105 |
+
else:
|
106 |
+
print("No input text provided. Please use -f to specify a file or -p for direct text input.")
|
107 |
+
return
|
108 |
+
|
109 |
+
# Generate and print the response
|
110 |
+
response = generate_response(model, input_text + " [RESPONSE]", sequence_length=args.length, temperature=args.temperature)
|
111 |
+
print("\nResponse:")
|
112 |
+
print(response)
|
113 |
+
|
114 |
+
if __name__ == "__main__":
|
115 |
+
main()
|
116 |
+
|
117 |
+
|
118 |
+
```
|
119 |
+
|
120 |
+
```bash
|
121 |
+
python conversation_app.py -t TEMPERATURE -f FILE -p TEXT -k TOPK -m MODEL_PATH
|
122 |
+
```
|
123 |
+
|
124 |
+
- `-t TEMPERATURE` or `--temperature TEMPERATURE`: Sets the temperature for predictions. A higher temperature results in more diverse results. Default is 0.5.
|
125 |
+
- `-f FILE` or `--file FILE`: Specifies a path to a file containing input text. The script will read this file and use its contents as input.
|
126 |
+
- `-p TEXT` or `--text TEXT`: Directly provide the input text as a string.
|
127 |
+
- `-n LENGTH` or `--length TOPK`: Determines the number of top predictions to consider for the model's output. Default is 50.
|
128 |
+
- `-m MODEL_PATH` or `--model MODEL_PATH`: The path to the FastText model file (required).
|
129 |
+
|
130 |
+
### Example
|
131 |
+
|
132 |
+
```bash
|
133 |
+
python conversation_app.py -t 0.7 -p "What is the future of AI?" -n 40 -m /path/to/model.bin
|
134 |
+
```
|
135 |
+
|
136 |
+
This command sets the temperature to 0.7, uses the provided question as input, considers the top 40 predictions, and specifies the model file path.
|
137 |
+
|
138 |
+
## Note
|
139 |
+
|
140 |
+
- The script's output depends on the quality and training of the FastText model used.
|
141 |
+
- Ensure the specified model file path and input file path (if used) are correct.
|
142 |
+
|