ThomasBlumet commited on
Commit
5362bf1
·
1 Parent(s): 6ffae36

change model use

Browse files
Files changed (5) hide show
  1. .dockerignore +27 -0
  2. .vscode/launch.json +19 -0
  3. .vscode/tasks.json +26 -0
  4. Dockerfile +21 -44
  5. app.py +63 -50
.dockerignore ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__
2
+ **/.venv
3
+ **/.classpath
4
+ **/.dockerignore
5
+ **/.env
6
+ **/.git
7
+ **/.gitignore
8
+ **/.project
9
+ **/.settings
10
+ **/.toolstarget
11
+ **/.vs
12
+ **/.vscode
13
+ **/*.*proj.user
14
+ **/*.dbmdl
15
+ **/*.jfm
16
+ **/bin
17
+ **/charts
18
+ **/docker-compose*
19
+ **/compose*
20
+ **/Dockerfile*
21
+ **/node_modules
22
+ **/npm-debug.log
23
+ **/obj
24
+ **/secrets.dev.yaml
25
+ **/values.dev.yaml
26
+ LICENSE
27
+ README.md
.vscode/launch.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "configurations": [
3
+ {
4
+ "name": "Docker: Python - General",
5
+ "type": "docker",
6
+ "request": "launch",
7
+ "preLaunchTask": "docker-run: debug",
8
+ "python": {
9
+ "pathMappings": [
10
+ {
11
+ "localRoot": "${workspaceFolder}",
12
+ "remoteRoot": "/app"
13
+ }
14
+ ],
15
+ "projectType": "general"
16
+ }
17
+ }
18
+ ]
19
+ }
.vscode/tasks.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "2.0.0",
3
+ "tasks": [
4
+ {
5
+ "type": "docker-build",
6
+ "label": "docker-build",
7
+ "platform": "python",
8
+ "dockerBuild": {
9
+ "tag": "storygenerationdocker:latest",
10
+ "dockerfile": "${workspaceFolder}/Dockerfile",
11
+ "context": "${workspaceFolder}",
12
+ "pull": true
13
+ }
14
+ },
15
+ {
16
+ "type": "docker-run",
17
+ "label": "docker-run: debug",
18
+ "dependsOn": [
19
+ "docker-build"
20
+ ],
21
+ "python": {
22
+ "file": "app.py"
23
+ }
24
+ }
25
+ ]
26
+ }
Dockerfile CHANGED
@@ -1,51 +1,28 @@
1
- # # For more information, please refer to https://aka.ms/vscode-docker-python
2
- # FROM python:3.9
3
-
4
- # # Where we'll copy the code
5
- # WORKDIR /code
6
-
7
- # # Copy the current directory contents into the container at /code
8
- # COPY ./requirements.txt /code/requirements.txt
9
- # # Install pip requirements
10
- # RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
-
12
- # # Creates a non-root user with an explicit UID
13
- # # For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers
14
- # RUN useradd -m -u 1000 user
15
- # USER user
16
-
17
- # # Set home to the user's home directory
18
- # ENV HOME=/home/user \
19
- # PATH=/home/user/.local/bin:$PATH
20
-
21
- # # Set the working directory to the user's home directory
22
- # WORKDIR $HOME/app
23
-
24
- # # Copy the current directory contents into the container at $HOME/app setting the owner to the user
25
- # COPY --chown=user . $HOME/app
26
-
27
- # # During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug
28
- # CMD ["uvicorn","app:app", "--host", "0.0.0.0", "--port", "7860"]
29
-
30
-
31
- # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
32
- # you will also find guides on how best to write your Dockerfile
33
-
34
  FROM python:3.9
35
 
36
- # The two following lines are requirements for the Dev Mode to be functional
37
- # Learn more about the Dev Mode at https://huggingface.co/dev-mode-explorers
38
- RUN useradd -m -u 1000 user
39
- WORKDIR /app
40
-
41
- COPY --chown=user ./requirements.txt requirements.txt
42
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
43
 
44
- COPY --chown=user . /app
 
 
 
45
 
 
 
 
46
  USER user
47
 
48
- ENV HOME=/home/user \
49
- PATH=/home/user/.local/bin:$PATH
 
 
 
 
 
 
 
50
 
51
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
1
+ # For more information, please refer to https://aka.ms/vscode-docker-python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  FROM python:3.9
3
 
4
+ # Where we'll copy the code
5
+ WORKDIR /code
 
 
 
 
 
6
 
7
+ # Copy the current directory contents into the container at /code
8
+ COPY ./requirements.txt /code/requirements.txt
9
+ # Install pip requirements
10
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
 
12
+ # Creates a non-root user with an explicit UID
13
+ # For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers
14
+ RUN useradd -m -u 1000 user
15
  USER user
16
 
17
+ # Set home to the user's home directory
18
+ ENV HOME=/home/user \
19
+ PATH=/home/user/.local/bin:$PATH
20
+
21
+ # Set the working directory to the user's home directory
22
+ WORKDIR $HOME/app
23
+
24
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
25
+ COPY --chown=user . $HOME/app
26
 
27
+ # During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug
28
+ CMD ["uvicorn","app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -1,58 +1,71 @@
1
- from transformers import BartForConditionalGeneration, BartTokenizer
2
  from transformers.utils import logging
3
  import gradio as gr
4
 
5
  #define the logger instance
6
  logger = logging.get_logger("transformers")
7
 
8
- # Charger le modèle BART et le tokenizer
9
- model_name = "facebook/bart-large-cnn"
10
- tokenizer = BartTokenizer.from_pretrained(model_name)
11
- model = BartForConditionalGeneration.from_pretrained(model_name)
12
-
13
- # Fonction pour générer du texte
14
- def generate_text(prompt):
15
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
16
- summary_ids = model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
17
- return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
18
-
19
- #for training the model after the data is collected
20
- #model.save_pretrained("model")
21
- #tokenizer.save_pretrained("model")
22
-
23
- #for the app functions
24
- def clear_save_textbox(message):
25
- return " ", message
26
-
27
- def show_input_text(message,history:list[tuple[str,str]]):
28
- history.append((message,""))
29
- story = generate_text(message)
30
- history[-1] = (message,story)
31
- return history
32
-
33
- def delete_previous_text(history:list[tuple[str,str]]):
34
- try:
35
- message, _ = history.pop()
36
- except IndexError:
37
- message = " "
38
- return history, message
39
-
40
- # Créer une interface de saisie avec Gradio
41
- interface = gr.Interface(fn=generate_text, inputs="text", outputs="text",title="TeLLMyStory",description="Enter your story idea and the model will generate the story based on it.")
42
- with gr.Blocks() as demo:
43
- gr.Markdown("TeLLMyStory chatbot")
44
- #input_text = blocks.text(name="input_text", label="Enter your story idea here", default="Once upon a time, there was")
45
- with gr.Row():
46
- input_text = gr.Textbox(label="Enter your story idea here")
47
- #clear_button = gr.Button("Clear",variant="secondary")
48
- #clear_button.click(fn=clear_save_textbox, inputs=[input_text])
49
- #retry_button = gr.Button("Retry", fn=delete_previous_text, inputs=[input_text],variants=["secondary"])
50
-
51
- with gr.Row():
52
- gr.Markdown("History of your story ideas")
53
- gen_story = gr.Textbox(label="History")
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- #send_button = gr.Button(name="send_button", label="Send", fn=show_input_text, inputs=[input_text],outputs=[gen_story],variants=["primary"])
56
 
57
- # Lancer l'interface
58
- interface.launch()
 
1
+ from transformers import BartForConditionalGeneration, BartTokenizer, pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
2
  from transformers.utils import logging
3
  import gradio as gr
4
 
5
  #define the logger instance
6
  logger = logging.get_logger("transformers")
7
 
8
+ #other text-to-text model
9
+ chatbot = pipeline("text2text-generation", model="google/flan-t5-small") #model= "gpt2"
10
+ def respond(prompt):
11
+ result = chatbot(prompt, max_length=50, num_return_sequences=1)
12
+ return result[0]['generated_text']
13
+ interface = gr.Interface(fn=respond, inputs="text", outputs="text")
14
+ interface.launch()
15
+
16
+ #load the model
17
+ model_name = "google/flan-t5-small"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
20
+
21
+ # # Charger le modèle BART et le tokenizer
22
+ # model_name = "facebook/bart-large-cnn"
23
+ # tokenizer = BartTokenizer.from_pretrained(model_name)
24
+ # model = BartForConditionalGeneration.from_pretrained(model_name)
25
+
26
+ # # Fonction pour générer du texte
27
+ # def generate_text(prompt):
28
+ # inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
29
+ # summary_ids = model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
30
+ # return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
31
+
32
+ # #for training the model after the data is collected
33
+ # #model.save_pretrained("model")
34
+ # #tokenizer.save_pretrained("model")
35
+
36
+ # #for the app functions
37
+ # def clear_save_textbox(message):
38
+ # return " ", message
39
+
40
+ # def show_input_text(message,history:list[tuple[str,str]]):
41
+ # history.append((message,""))
42
+ # story = generate_text(message)
43
+ # history[-1] = (message,story)
44
+ # return history
45
+
46
+ # def delete_previous_text(history:list[tuple[str,str]]):
47
+ # try:
48
+ # message, _ = history.pop()
49
+ # except IndexError:
50
+ # message = " "
51
+ # return history, message
52
+
53
+ # # Créer une interface de saisie avec Gradio
54
+ # interface = gr.Interface(fn=generate_text, inputs="text", outputs="text",title="TeLLMyStory",description="Enter your story idea and the model will generate the story based on it.")
55
+ # with gr.Blocks() as demo:
56
+ # gr.Markdown("TeLLMyStory chatbot")
57
+ # #input_text = blocks.text(name="input_text", label="Enter your story idea here", default="Once upon a time, there was")
58
+ # with gr.Row():
59
+ # input_text = gr.Textbox(label="Enter your story idea here")
60
+ # #clear_button = gr.Button("Clear",variant="secondary")
61
+ # #clear_button.click(fn=clear_save_textbox, inputs=[input_text])
62
+ # #retry_button = gr.Button("Retry", fn=delete_previous_text, inputs=[input_text],variants=["secondary"])
63
+
64
+ # with gr.Row():
65
+ # gr.Markdown("History of your story ideas")
66
+ # gen_story = gr.Textbox(label="History")
67
 
68
+ # #send_button = gr.Button(name="send_button", label="Send", fn=show_input_text, inputs=[input_text],outputs=[gen_story],variants=["primary"])
69
 
70
+ # # Lancer l'interface
71
+ # interface.launch()