Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,6 @@ os.system("curl https://ollama.ai/install.sh | sh")
|
|
8 |
import nest_asyncio
|
9 |
nest_asyncio.apply()
|
10 |
|
11 |
-
import os
|
12 |
import asyncio
|
13 |
|
14 |
# Run Async Ollama
|
@@ -41,7 +40,6 @@ async def run_process(cmd):
|
|
41 |
# call it
|
42 |
await asyncio.gather(pipe(process.stdout), pipe(process.stderr))
|
43 |
|
44 |
-
import asyncio
|
45 |
import threading
|
46 |
|
47 |
async def start_ollama_serve():
|
@@ -85,7 +83,7 @@ gemma2 = Ollama(model=model_name, request_timeout=30.0)
|
|
85 |
TITLE = "<h1><center>Chatbox</center></h1>"
|
86 |
|
87 |
DESCRIPTION = f"""
|
88 |
-
<h3>MODEL: <a href="https://hf.co/{
|
89 |
<center>
|
90 |
<p>Gemma is the large language model built by Google.
|
91 |
<br>
|
|
|
8 |
import nest_asyncio
|
9 |
nest_asyncio.apply()
|
10 |
|
|
|
11 |
import asyncio
|
12 |
|
13 |
# Run Async Ollama
|
|
|
40 |
# call it
|
41 |
await asyncio.gather(pipe(process.stdout), pipe(process.stderr))
|
42 |
|
|
|
43 |
import threading
|
44 |
|
45 |
async def start_ollama_serve():
|
|
|
83 |
TITLE = "<h1><center>Chatbox</center></h1>"
|
84 |
|
85 |
DESCRIPTION = f"""
|
86 |
+
<h3>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></h3>
|
87 |
<center>
|
88 |
<p>Gemma is the large language model built by Google.
|
89 |
<br>
|