Doron Adler commited on
Commit
aa440ce
โ€ข
1 Parent(s): 2800edf

Hebrew Poetry - GPT Neo (XL)

Browse files
Files changed (4) hide show
  1. README.md +5 -5
  2. app.py +124 -0
  3. requirements.txt +4 -0
  4. start.sh +10 -0
README.md CHANGED
@@ -1,11 +1,11 @@
1
  ---
2
- title: Hebrew Poetry_GPT Neo XL
3
- emoji: ๐Ÿ“ˆ
4
- colorFrom: green
5
- colorTo: green
6
  sdk: streamlit
7
  app_file: app.py
8
- pinned: false
9
  ---
10
 
11
  # Configuration
 
1
  ---
2
+ title: Hebrew Poetry - GPT Neo (XL)
3
+ emoji: ๐ŸŒต
4
+ colorFrom: blue
5
+ colorTo: gray
6
  sdk: streamlit
7
  app_file: app.py
8
+ pinned: true
9
  ---
10
 
11
  # Configuration
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import argparse
4
+ import re
5
+ import os
6
+
7
+ import streamlit as st
8
+ import random
9
+ import numpy as np
10
+ import torch
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+
13
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
14
+
15
+ @st.cache(allow_output_mutation=True)
16
+ def load_model(model_name):
17
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
18
+ model = AutoModelForCausalLM.from_pretrained(model_name)
19
+ return model, tokenizer
20
+
21
+
22
+ def extend(input_text, max_size=20, top_k=50, top_p=0.95):
23
+ if len(input_text) == 0:
24
+ input_text = "ืฉื ื”ื™ืฆื™ืจื”: "
25
+
26
+ encoded_prompt = tokenizer.encode(
27
+ input_text, add_special_tokens=False, return_tensors="pt")
28
+
29
+ encoded_prompt = encoded_prompt.to(device)
30
+
31
+ if encoded_prompt.size()[-1] == 0:
32
+ input_ids = None
33
+ else:
34
+ input_ids = encoded_prompt
35
+
36
+ output_sequences = model.generate(
37
+ input_ids=input_ids,
38
+ max_length=max_size + len(encoded_prompt[0]),
39
+ top_k=top_k,
40
+ top_p=top_p,
41
+ do_sample=True,
42
+ num_return_sequences=1)
43
+
44
+ # Remove the batch dimension when returning multiple sequences
45
+ if len(output_sequences.shape) > 2:
46
+ output_sequences.squeeze_()
47
+
48
+ generated_sequences = []
49
+
50
+ for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
51
+ generated_sequence = generated_sequence.tolist()
52
+
53
+ # Decode text
54
+ text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
55
+
56
+ # Remove all text after the stop token
57
+ text = text[: text.find(stop_token) if stop_token else None]
58
+
59
+ # Remove all text after 3 newlines
60
+ text = text[: text.find(new_lines) if new_lines else None]
61
+
62
+ # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
63
+ total_sequence = (
64
+ input_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
65
+ )
66
+
67
+ generated_sequences.append(total_sequence)
68
+
69
+ parsed_text = total_sequence.replace("<|startoftext|>", "").replace("\r","").replace("\n\n", "\n")
70
+ if len(parsed_text) == 0:
71
+ parsed_text = "ืฉื’ื™ืื”"
72
+ return parsed_text
73
+
74
+
75
+
76
+ if __name__ == "__main__":
77
+ st.title("Hebrew Poetry - GPT Neo (XL)")
78
+
79
+ model, tokenizer = load_model("Norod78/hebrew-gpt_neo-xl-poetry")
80
+ #model, tokenizer = load_model("Norod78/hebrew_poetry-gpt_neo-tiny")
81
+
82
+ stop_token = "<|endoftext|>"
83
+ new_lines = "\n\n\n"
84
+
85
+ np.random.seed(None)
86
+ random_seed = np.random.randint(10000,size=1)
87
+
88
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
89
+ n_gpu = 0 if torch.cuda.is_available()==False else torch.cuda.device_count()
90
+
91
+ torch.manual_seed(random_seed)
92
+ if n_gpu > 0:
93
+ torch.cuda.manual_seed_all(random_seed)
94
+
95
+ model.to(device)
96
+
97
+ st.sidebar.subheader("Configurable parameters")
98
+
99
+ max_len = st.sidebar.slider("Max-Length", 0, 512, 128,help="The maximum length of the sequence to be generated.")
100
+ top_k = st.sidebar.slider("Top-K", 0, 100, 50, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
101
+ top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 0.95, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
102
+
103
+ st.markdown(
104
+ """Hebrew poetry text generation model based on EleutherAI's gpt-neo. Each was trained on a TPUv3-8 which was made avilable to me via the [TPU Research Cloud Program](https://sites.research.google/trc/). """
105
+ )
106
+
107
+ prompt = "ื”ืื™ืฉ ื”ืื—ืจื•ืŸ ื‘ืขื•ืœื ื™ืฉื‘ ืœื‘ื“ ื‘ื—ื“ืจื• ื›ืฉืœืคืชืข ื ืฉืžืข ื ืงื™ืฉื”"
108
+ text = st.text_area("Enter text", prompt)
109
+
110
+ if st.button("Run"):
111
+ with st.spinner(text="Generating results..."):
112
+ st.subheader("Result")
113
+ print(f"maxlen:{max_len}, top_k:{top_k}, top_p:{top_p}")
114
+ result = extend(input_text=text,
115
+ max_size=int(max_len),
116
+ top_k=int(top_k),
117
+ top_p=float(top_p))
118
+
119
+ print("result:", result)
120
+ #<div class="rtl" dir="rtl" style="text-align:right;">
121
+ st.markdown("<p dir=\"rtl\" style=\"text-align:right;\">", unsafe_allow_html=True)
122
+ st.markdown(f" {result} ")
123
+ #st.write(result)
124
+ st.markdown("</p>", unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit==0.80.0
2
+ transformers
3
+ tokenizers
4
+ torch
start.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -e
3
+
4
+ if [ "$DEBUG" = true ] ; then
5
+ echo 'Debugging - ON'
6
+ nodemon --exec streamlit run app.py
7
+ else
8
+ echo 'Debugging - OFF'
9
+ streamlit run app.py
10
+ fi