NithyasriVllB commited on
Commit
70960f4
·
verified ·
1 Parent(s): a5f7405

Upload 3 files

Browse files
Files changed (3) hide show
  1. .gitattributes +35 -35
  2. README.md +13 -12
  3. app.py +75 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,13 @@
1
- ---
2
- title: Fast Chatbot
3
- emoji: 🐢
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.4.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ ---
2
+ title: Real Time Chat With AI
3
+ emoji:
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.31.5
8
+ app_file: app.py
9
+ pinned: false
10
+ short_description: Chat with AI with ⚡Lightning Speed
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ client = InferenceClient("google/gemma-1.1-2b-it")
5
+ client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
6
+
7
+ def models(Query):
8
+
9
+ messages = []
10
+
11
+ messages.append({"role": "user", "content": f"[SYSTEM] You are ASSISTANT who answer question asked by user in short and concise manner. [USER] {Query}"})
12
+
13
+ Response = ""
14
+
15
+ for message in client.chat_completion(
16
+ messages,
17
+ max_tokens=2048,
18
+ stream=True
19
+ ):
20
+ token = message.choices[0].delta.content
21
+
22
+ Response += token
23
+ yield Response
24
+
25
+ def nemo(query):
26
+ budget = 3
27
+ message = f"""[INST] [SYSTEM] You are a helpful assistant in normal conversation.
28
+ When given a problem to solve, you are an expert problem-solving assistant.
29
+ Your task is to provide a detailed, step-by-step solution to a given question.
30
+ Follow these instructions carefully:
31
+ 1. Read the given question carefully and reset counter between <count> and </count> to {budget} (maximum 3 steps).
32
+ 2. Think critically like a human researcher or scientist. Break down the problem using first principles to conceptually understand and answer the question.
33
+ 3. Generate a detailed, logical step-by-step solution.
34
+ 4. Enclose each step of your solution within <step> and </step> tags.
35
+ 5. You are allowed to use at most {budget} steps (starting budget), keep track of it by counting down within tags <count> </count>, STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
36
+ 6. Do a self-reflection when you are unsure about how to proceed, based on the self-reflection and reward, decide whether you need to return to the previous steps.
37
+ 7. After completing the solution steps, reorganize and synthesize the steps into the final answer within <answer> and </answer> tags.
38
+ 8. Provide a critical, honest, and subjective self-evaluation of your reasoning process within <reflection> and </reflection> tags.
39
+ 9. Assign a quality score to your solution as a float between 0.0 (lowest quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
40
+ Example format:
41
+ <count> [starting budget] </count>
42
+ <step> [Content of step 1] </step>
43
+ <count> [remaining budget] </count>
44
+ <step> [Content of step 2] </step>
45
+ <reflection> [Evaluation of the steps so far] </reflection>
46
+ <reward> [Float between 0.0 and 1.0] </reward>
47
+ <count> [remaining budget] </count>
48
+ <step> [Content of step 3 or Content of some previous step] </step>
49
+ <count> [remaining budget] </count>
50
+ ...
51
+ <step> [Content of final step] </step>
52
+ <count> [remaining budget] </count>
53
+ <answer> [Final Answer] </answer> (must give final answer in this format)
54
+ <reflection> [Evaluation of the solution] </reflection>
55
+ <reward> [Float between 0.0 and 1.0] </reward> [/INST] [INST] [QUERY] {query} [/INST] [ASSISTANT] """
56
+
57
+ stream = client.text_generation(message, max_new_tokens=4096, stream=True, details=True, return_full_text=False)
58
+ output = ""
59
+
60
+ for response in stream:
61
+ output += response.token.text
62
+ return output
63
+
64
+ description="# Chat GO\n### Enter your query and Press enter and get lightning fast response"
65
+
66
+ with gr.Blocks() as demo1:
67
+ gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
68
+ with gr.Blocks() as demo2:
69
+ gr.Interface(description="Very low but critical thinker",fn=nemo, inputs=["text"], outputs="text", api_name="critical_thinker", concurrency_limit=10)
70
+
71
+ with gr.Blocks() as demo:
72
+ gr.TabbedInterface([demo1, demo2] , ["Fast", "Critical"])
73
+
74
+ demo.queue(max_size=300000)
75
+ demo.launch()