Commit
·
bfbb419
1
Parent(s):
ab3abf2
- chat_page.py +165 -0
- check copy.py +0 -0
- check.py +31 -0
- content.py +60 -0
- content_mod.py +150 -0
- cpvtonplus.md +82 -0
- groundedsegmentanything.md +12 -0
- magicanimate.md +135 -0
- style.css +9 -0
- try.py +73 -0
- ui.py +26 -0
- updated_prompt.txt +37 -0
chat_page.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import vertexai
|
2 |
+
import http.client
|
3 |
+
import typing
|
4 |
+
import urllib.request
|
5 |
+
from vertexai.preview.generative_models import GenerativeModel ,ChatSession
|
6 |
+
import streamlit as st
|
7 |
+
from io import StringIO
|
8 |
+
import pandas as pd
|
9 |
+
from streamlit_extras.let_it_rain import rain
|
10 |
+
import time
|
11 |
+
import nltk
|
12 |
+
from textblob import TextBlob
|
13 |
+
|
14 |
+
st.set_page_config (page_icon="icon.jpg",page_title="Content Moderation",layout="wide")
|
15 |
+
st.markdown("<h1 style='color: var(--black-100, var(--black-100, #1C1C1C));text-align: center;font-feature-settings: 'cv11' on, 'cv01' on, 'ss01' on;font-family: Poppins;font-size: 48px;font-style: normal;font-weight: 600;line-height: 58px;'>Content Moderation</h1>",
|
16 |
+
unsafe_allow_html=True)
|
17 |
+
project_id = "agileai-poc"
|
18 |
+
loc = "us-central1"
|
19 |
+
model= GenerativeModel("gemini-pro")
|
20 |
+
|
21 |
+
# prompt="""understand the content provided and if any spaces found in content ignore and generate the output only in the given format
|
22 |
+
# format: 1.Tone:find the tone of content
|
23 |
+
# 2.Negative sentences :"Only find the negative sentences based on semantic analysis" to ensure accuracy in detecting negative words in pointwise and "must highlight the negative words in bold" in same sentence
|
24 |
+
# if the content is in positive tone give output as "No changes required"
|
25 |
+
# """
|
26 |
+
# prompt2=""" understand the content provided and generate the output only in the given format
|
27 |
+
# format: 1.Tone :provide the generated content tone
|
28 |
+
# 2.Content:"Don't explain the content" just modify the same content by only "Replacing the negative words by converting the tone into formal",if the content doesn't have any negative words give output as "No changes required" """
|
29 |
+
prompt="""find the tone of content state whether it is positive or negative and generate the output only in the given format
|
30 |
+
format: Tone:just specify the tone is positive or negative """
|
31 |
+
# if input_text:
|
32 |
+
## Define layout and containers
|
33 |
+
HEIGHT = 1000
|
34 |
+
|
35 |
+
cols = st.columns(2)
|
36 |
+
|
37 |
+
with cols[0]:
|
38 |
+
left_panel = st.container(height=HEIGHT + 15, border=True)
|
39 |
+
|
40 |
+
with cols[1]:
|
41 |
+
right_panel=st.container(height=HEIGHT +15,border=True)
|
42 |
+
|
43 |
+
## Add contents
|
44 |
+
def offensive_text(input):
|
45 |
+
if input :
|
46 |
+
response=model.generate_content([prompt,input])
|
47 |
+
tone=response.text
|
48 |
+
if tone=="negative":
|
49 |
+
# print(response)
|
50 |
+
output=st.write(response.text)
|
51 |
+
check=st.write("negative")
|
52 |
+
elif tone=="positive":
|
53 |
+
with right_panel:
|
54 |
+
# print(response)
|
55 |
+
# st.write(response.text)
|
56 |
+
check=st.write("positive")
|
57 |
+
else :
|
58 |
+
check=st.write("Your content needs to be modified ..!!")
|
59 |
+
return check
|
60 |
+
# with left_panel:
|
61 |
+
# st.markdown("<h5 style='font-style:bold,color:blue'>Fans Dashboard</h5>",unsafe_allow_html=True)
|
62 |
+
# input=st.chat_input("Type your comment")
|
63 |
+
# offensive_text(input)
|
64 |
+
# model = GenerativeModel("gemini-pro")
|
65 |
+
chat = model.start_chat(history=[])
|
66 |
+
|
67 |
+
# def get_chat_response(chat: ChatSession, prompt: str) -> str:
|
68 |
+
# response = chat.send_message(prompt)
|
69 |
+
# return response.text
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
# Function to determine the tone (positive/negative) of the input
|
74 |
+
def determine_tone(input_text):
|
75 |
+
# Perform sentiment analysis using TextBlob
|
76 |
+
analysis = TextBlob(input_text)
|
77 |
+
|
78 |
+
# Determine the polarity of the sentiment
|
79 |
+
polarity = analysis.sentiment.polarity
|
80 |
+
|
81 |
+
# Check if the sentiment polarity is positive, negative, or neutral
|
82 |
+
if polarity > 0:
|
83 |
+
return "Positive"
|
84 |
+
elif polarity < 0:
|
85 |
+
return "Negative"
|
86 |
+
else:
|
87 |
+
return "Neutral"
|
88 |
+
if 'chat_history' not in st.session_state:
|
89 |
+
st.session_state['chat_history'] = []
|
90 |
+
with left_panel:
|
91 |
+
st.markdown("<h5 style='font-style:bold,color:blue'>Fans Dashboard</h5>",unsafe_allow_html=True)
|
92 |
+
with right_panel:
|
93 |
+
st.markdown("<h5 style='font-style:bold,color:blue'>Celeb Dashboard</h5>",unsafe_allow_html=True)
|
94 |
+
|
95 |
+
# Example usage
|
96 |
+
input_text = st.chat_input("type your comment")
|
97 |
+
if input_text:
|
98 |
+
with left_panel:
|
99 |
+
# st.markdown("<h5 style='font-style:bold,color:blue'>Fans Dashboard</h5>",unsafe_allow_html=True)
|
100 |
+
if input_text:
|
101 |
+
tone = determine_tone(input_text)
|
102 |
+
# st.write(tone)
|
103 |
+
if tone=="Negative":
|
104 |
+
st.write(":x:",input_text)
|
105 |
+
# st.text_area(input_text)
|
106 |
+
elif tone=="Positive":
|
107 |
+
st.write(":heavy_check_mark:",input_text)
|
108 |
+
else:
|
109 |
+
st.write("your content need to modifed")
|
110 |
+
with right_panel:
|
111 |
+
# st.markdown("<h5 style='font-style:bold,color:blue'>Celeb Dashboard</h5>",unsafe_allow_html=True)
|
112 |
+
tone = determine_tone(input_text)
|
113 |
+
if tone=="Positive":
|
114 |
+
st.write(":heavy_check_mark:",input_text)
|
115 |
+
# print("The tone of the input is:", tone)
|
116 |
+
|
117 |
+
#print("the tone is not required :")
|
118 |
+
# prompt = st.chat_input("Type your comment")
|
119 |
+
# # if prompt:
|
120 |
+
# # # st.write(get_chat_response(chat, prompt))
|
121 |
+
# st.write(offensive_text(prompt))
|
122 |
+
# prompt = "What are all the colors in a rainbow?"
|
123 |
+
# print(get_chat_response(chat, prompt))
|
124 |
+
|
125 |
+
# prompt = "Why does it appear when it rains?"
|
126 |
+
# print(get_chat_response(chat, prompt))
|
127 |
+
# # try:
|
128 |
+
# with upper_right_panel:
|
129 |
+
# st.markdown("<h6 style='color:red;font-style:bold'>Analyzed Content:</h6>",unsafe_allow_html=True)
|
130 |
+
# st.write(response.text)
|
131 |
+
# with lower_right_panel:
|
132 |
+
# st.markdown("<h6 style='color:green;font-style:bold'>Rephrased content:</h6>",unsafe_allow_html=True)
|
133 |
+
# reference=model.generate_content([prompt2,response.text])
|
134 |
+
# st.write(reference.text)
|
135 |
+
# except:
|
136 |
+
# st.error("Check the input text ,avoid empty spaces if any.")
|
137 |
+
# st.stop()
|
138 |
+
# if "messages" not in st.session_state:
|
139 |
+
# st.session_state.messages = []
|
140 |
+
|
141 |
+
# for message in st.session_state.messages:
|
142 |
+
# with st.chat_message(message["role"]):
|
143 |
+
# st.markdown(message["content"])
|
144 |
+
# queries = st.chat_input("Ask your queries here!")
|
145 |
+
# if prompt := queries:
|
146 |
+
# # Display user message in chat message container
|
147 |
+
# with st.chat_message("user"):
|
148 |
+
# st.markdown(prompt)
|
149 |
+
# # Add user message to chat history
|
150 |
+
# st.session_state.messages.append({"role": "user", "content": prompt})
|
151 |
+
|
152 |
+
# ask_url = "http://127.0.0.1:5000/predict"
|
153 |
+
# question_data = {"questions": prompt}
|
154 |
+
# with st.spinner("..."):
|
155 |
+
# response = requests.post(
|
156 |
+
# url=ask_url,
|
157 |
+
# json=question_data,
|
158 |
+
# )
|
159 |
+
# # response=prompt
|
160 |
+
|
161 |
+
# answer = response.json().get("predicted_answer")
|
162 |
+
# with st.chat_message("Assistant"):
|
163 |
+
# print("Ans : ", answer)
|
164 |
+
# st.write(answer)
|
165 |
+
# st.session_state.messages.append({"role": "Assistant", "content": answer})
|
check copy.py
ADDED
File without changes
|
check.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import string
|
3 |
+
import random
|
4 |
+
|
5 |
+
|
6 |
+
def randon_string() -> str:
|
7 |
+
return "".join(random.choices(string.ascii_uppercase + string.digits, k=10))
|
8 |
+
|
9 |
+
|
10 |
+
def chat_actions():
|
11 |
+
st.session_state["chat_history"].append(
|
12 |
+
{"role": "user", "content": st.session_state["chat_input"]},
|
13 |
+
)
|
14 |
+
|
15 |
+
st.session_state["chat_history"].append(
|
16 |
+
{
|
17 |
+
"role": "assistant",
|
18 |
+
"content": randon_string(),
|
19 |
+
}, # This can be replaced with your chat response logic
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
if "chat_history" not in st.session_state:
|
24 |
+
st.session_state["chat_history"] = []
|
25 |
+
|
26 |
+
|
27 |
+
st.chat_input("Enter your message", on_submit=chat_actions, key="chat_input")
|
28 |
+
|
29 |
+
for i in st.session_state["chat_history"]:
|
30 |
+
with st.chat_message(name=i["role"]):
|
31 |
+
st.write(i["content"])
|
content.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import langchain
|
3 |
+
import vertexai
|
4 |
+
from vertexai.language_models import TextGenerationModel
|
5 |
+
import streamlit as st
|
6 |
+
from langchain_community.llms import VertexAI
|
7 |
+
from langchain.prompts import PromptTemplate
|
8 |
+
from langchain.chat_models import ChatVertexAI
|
9 |
+
from typing import Any
|
10 |
+
from langchain.base_language import BaseLanguageModel
|
11 |
+
from langchain.chains.llm import LLMChain
|
12 |
+
from langchain.embeddings import VertexAIEmbeddings
|
13 |
+
import os
|
14 |
+
|
15 |
+
os.environ['GOOGLE_APPLICATION_CREDENTIALS']="agileai-poc-10f5fe13f8a2.json"
|
16 |
+
model = TextGenerationModel.from_pretrained("text-bison@001")
|
17 |
+
# project_id = "agileai-poc"
|
18 |
+
# loc = "us-central1"
|
19 |
+
# vertexai.init(project=project_id, location=loc)
|
20 |
+
# params = VertexAI(
|
21 |
+
# model_name="text-bison@001",
|
22 |
+
# max_output_tokens=256,
|
23 |
+
# temperature=0.2,
|
24 |
+
# top_p=0.8
|
25 |
+
# )
|
26 |
+
prompt="modify the text and highlight the points of the given input which type of tone it contains "
|
27 |
+
# class txt_gen(LLMChain):
|
28 |
+
# """LLM Chain specifically for generating multi paragraph rich text product description using emojis."""
|
29 |
+
|
30 |
+
# @classmethod
|
31 |
+
# def from_llm(
|
32 |
+
# cls, llm: BaseLanguageModel, prompt: str, **kwargs: Any
|
33 |
+
# ) -> txt_gen:
|
34 |
+
# """Load txt_gen Chain from LLM."""
|
35 |
+
# return cls(llm=params, prompt=prompt, **kwargs)
|
36 |
+
|
37 |
+
|
38 |
+
# def generate_text(input):
|
39 |
+
# with open(prompt, "r") as file:
|
40 |
+
# prompt_template = file.read()
|
41 |
+
|
42 |
+
# PROMPT = PromptTemplate(
|
43 |
+
# input_variables=[input], template=prompt_template
|
44 |
+
# )
|
45 |
+
|
46 |
+
# DescGen_chain = txt_gen.from_llm(llm=params, prompt=PROMPT)
|
47 |
+
# DescGen_query = DescGen_chain.apply_and_parse(
|
48 |
+
# [{"input":input}]
|
49 |
+
# )
|
50 |
+
# return DescGen_query[0]["text"]
|
51 |
+
c1,c2,c3=st.columns(3)
|
52 |
+
with c1:
|
53 |
+
input=st.text_input("Enter your content :")
|
54 |
+
submit=st.button("Submit")
|
55 |
+
if submit:
|
56 |
+
# description = st.write(generate_text(input))
|
57 |
+
desc=st.write(model.predict(prompt))
|
58 |
+
# print(model.predict(prompt))
|
59 |
+
# with c3:
|
60 |
+
# output=st.write(description)
|
content_mod.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import vertexai
|
2 |
+
import http.client
|
3 |
+
import typing
|
4 |
+
import urllib.request
|
5 |
+
from vertexai.preview.generative_models import GenerativeModel #,Part,Image
|
6 |
+
import streamlit as st
|
7 |
+
from io import StringIO
|
8 |
+
import pandas as pd
|
9 |
+
from streamlit_extras.let_it_rain import rain
|
10 |
+
import time
|
11 |
+
|
12 |
+
st.set_page_config (page_icon="icon.jpg",page_title="Content Moderation",layout="wide")
|
13 |
+
st.markdown("<h1 style='color: var(--black-100, var(--black-100, #1C1C1C));text-align: center;font-feature-settings: 'cv11' on, 'cv01' on, 'ss01' on;font-family: Poppins;font-size: 48px;font-style: normal;font-weight: 600;line-height: 58px;'>Content Moderation</h1>",
|
14 |
+
unsafe_allow_html=True)
|
15 |
+
project_id = "agileai-poc"
|
16 |
+
loc = "us-central1"
|
17 |
+
model= GenerativeModel("gemini-pro")
|
18 |
+
# vision_model = GenerativeModel("gemini-pro-vision")
|
19 |
+
prompt="""understand the content provided and if any spaces found in content ignore and generate the output only in the given format
|
20 |
+
format: 1.Tone:find the tone of content
|
21 |
+
2.Negative sentences :"Only find the negative sentences based on semantic analysis" to ensure accuracy in detecting negative words in pointwise and "must highlight the negative words in bold" in same sentence
|
22 |
+
if the content is in positive tone give output as "No changes required"
|
23 |
+
"""
|
24 |
+
prompt2=""" understand the content provided and generate the output only in the given format
|
25 |
+
format: 1.Tone :provide the generated content tone
|
26 |
+
2.Content:"Don't explain the content" just modify the same content by only "Replacing the negative words by converting the tone into formal",if the content doesn't have any negative words give output as "No changes required" """
|
27 |
+
# prompt="updated_prompt.txt"
|
28 |
+
#Analysis:analyse the input text and highlight the points about tone of content,list the sentences which comes under negative,positive ,even mention the negative,postive,normal words which are used in sentence highlight them by providing different font color to easily differentiate and in bold
|
29 |
+
#Analysis:analyse the input text and highlight the points about tone of content,list out the negative sentences and highlight the negative words found in sentences in bold
|
30 |
+
|
31 |
+
# c1,c2,c3=st.columns((5,0.5,7))
|
32 |
+
# with c1:
|
33 |
+
# input=st.text_area("Post your Content",height=500)
|
34 |
+
|
35 |
+
# tab1,tab2=st.tabs(["Feedback","Reference"])
|
36 |
+
# if st.button("Submit"):
|
37 |
+
# response=model.generate_content([prompt,input])
|
38 |
+
# # with c3 :
|
39 |
+
# # st.write(response.text)
|
40 |
+
# # st.divider()
|
41 |
+
# # with c3:
|
42 |
+
# # if st.button("Reference "):
|
43 |
+
# # reference=model.generate_content([prompt2,response.text])
|
44 |
+
# # st.write(reference.text)
|
45 |
+
# with c3 :
|
46 |
+
|
47 |
+
# tab1.write(response.text)
|
48 |
+
# # with tab2:
|
49 |
+
# reference=model.generate_content([prompt2,response.text])
|
50 |
+
# tab2.write(reference.text)
|
51 |
+
|
52 |
+
|
53 |
+
## Define layout and containers
|
54 |
+
HEIGHT = 1000
|
55 |
+
|
56 |
+
cols = st.columns(2)
|
57 |
+
|
58 |
+
with cols[0]:
|
59 |
+
left_panel = st.container(height=HEIGHT + 15, border=True)
|
60 |
+
|
61 |
+
with cols[1]:
|
62 |
+
upper_right_panel = st.container(height=HEIGHT//2, border=True)
|
63 |
+
lower_right_panel = st.container(height=HEIGHT//2, border=True)
|
64 |
+
|
65 |
+
|
66 |
+
## Add contents
|
67 |
+
with left_panel:
|
68 |
+
st.markdown("<h5 style='font-style:bold,color:blue'>Post your Content</h5>",unsafe_allow_html=True)
|
69 |
+
input=st.text_area(label="v",label_visibility="collapsed",placeholder="your content",height=900)
|
70 |
+
|
71 |
+
st.toast("Check the input text ,avoid empty spaces if any.", icon="🚨")
|
72 |
+
time.sleep(10)
|
73 |
+
submit=st.button(":blue[Submit]")
|
74 |
+
if submit:
|
75 |
+
response=model.generate_content([prompt,input]) # return response
|
76 |
+
# with upper_right_panel:
|
77 |
+
# st.write(response.text)
|
78 |
+
# with lower_right_panel:
|
79 |
+
# st.markdown("<h6 style='color:green;font-style:bold'>Rephrased content:</h6>",unsafe_allow_html=True)
|
80 |
+
# reference=model.generate_content([prompt2,response.text])
|
81 |
+
# st.write(reference.text)
|
82 |
+
|
83 |
+
try:
|
84 |
+
with upper_right_panel:
|
85 |
+
st.markdown("<h6 style='color:red;font-style:bold'>Analyzed Content:</h6>",unsafe_allow_html=True)
|
86 |
+
st.write(response.text)
|
87 |
+
with lower_right_panel:
|
88 |
+
st.markdown("<h6 style='color:green;font-style:bold'>Rephrased content:</h6>",unsafe_allow_html=True)
|
89 |
+
reference=model.generate_content([prompt2,response.text])
|
90 |
+
st.write(reference.text)
|
91 |
+
except:
|
92 |
+
st.error("Check the input text ,avoid empty spaces if any.")
|
93 |
+
st.stop()
|
94 |
+
# rain(emoji="🎈",animation_length="10",falling_speed=10)
|
95 |
+
# for chunk in response :
|
96 |
+
# print(chunk.text)
|
97 |
+
# content_type=st.selectbox("Content Type",["Text","Image","Video"])
|
98 |
+
|
99 |
+
# # input=st.text_area(":blue[My text here :]",height=500)
|
100 |
+
# if "Video" in content_type:
|
101 |
+
# def generate_text(project_id: str, location: str) -> str:
|
102 |
+
# # Initialize Vertex AI
|
103 |
+
# # vertexai.init(project=project_id, location=loc)
|
104 |
+
# # Load the model
|
105 |
+
# # vision_model = GenerativeModel("gemini-pro-vision")
|
106 |
+
# # Generate text
|
107 |
+
# response = vision_model.generate_content(
|
108 |
+
# [
|
109 |
+
# Part.from_uri(
|
110 |
+
# "gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4"
|
111 |
+
# ),
|
112 |
+
# "What is in the video?",
|
113 |
+
# ]
|
114 |
+
# )
|
115 |
+
# print(response.text)
|
116 |
+
# return response.text
|
117 |
+
# img_file=st.file_uploader("choose an Image" )
|
118 |
+
# elif "Image" in content_type:
|
119 |
+
|
120 |
+
|
121 |
+
# # create helper function
|
122 |
+
# def load_image_from_url(image_url: str) -> Image:
|
123 |
+
# with urllib.request.urlopen(image_url) as response:
|
124 |
+
# response = typing.cast(http.client.HTTPResponse, response)
|
125 |
+
# image_bytes = response.read()
|
126 |
+
# return Image.from_bytes(image_bytes)
|
127 |
+
|
128 |
+
# # Load images from Cloud Storage URI
|
129 |
+
# # landmark1 = load_image_from_url(
|
130 |
+
# # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
|
131 |
+
# # )
|
132 |
+
|
133 |
+
# # Pass multimodal prompt
|
134 |
+
# # model = GenerativeModel("gemini-pro-vision")
|
135 |
+
# response = model.generate_content(
|
136 |
+
# [
|
137 |
+
# landmark1,
|
138 |
+
# "city: Rome, Landmark: the Colosseum",
|
139 |
+
# landmark2,
|
140 |
+
# "city: Beijing, Landmark: Forbidden City",
|
141 |
+
# landmark3,
|
142 |
+
# ]
|
143 |
+
# )
|
144 |
+
# print(response)
|
145 |
+
# landmark1 = st.tecload_image_from_url(
|
146 |
+
# "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
|
147 |
+
# # )
|
148 |
+
# if st.button("Reference "):
|
149 |
+
# reference=model.generate_content([prompt2,response])
|
150 |
+
# st.write(reference.text)
|
cpvtonplus.md
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CP-VTON+ (CVPRW 2020)
|
2 |
+
Official implementation for "CP-VTON+: Clothing Shape and Texture Preserving Image-Based Virtual Try-On" from CVPRW 2020.
|
3 |
+
<br/>Project page: https://minar09.github.io/cpvtonplus/.
|
4 |
+
<br/>Saved/Pre-trained models: [Checkpoints](https://1drv.ms/u/s!Ai8t8GAHdzVUiQA-o3C7cnrfGN6O?e=EaRiFP)
|
5 |
+
<br/>Dataset: [VITON_PLUS](https://1drv.ms/u/s!Ai8t8GAHdzVUiQQYX0azYhqIDPP6?e=4cpFTI)
|
6 |
+
<br/>The code and pre-trained models are tested with pytorch 0.4.1, torchvision 0.2.1, opencv-python 4.1 and pillow 5.4 (Python 3 env).
|
7 |
+
<br/><br/>
|
8 |
+
[Project page](https://minar09.github.io/cpvtonplus/) | [Paper](https://minar09.github.io/cpvtonplus/cvprw20_cpvtonplus.pdf) | [Dataset](https://1drv.ms/u/s!Ai8t8GAHdzVUiQRFmTPrtrAy0ZP5?e=rS1aK8) | [Model](https://1drv.ms/u/s!Ai8t8GAHdzVUiQA-o3C7cnrfGN6O?e=6PO4gq) | [Video](https://www.youtube.com/watch?v=MPB_PYLOfd8)
|
9 |
+
<br/><br/>
|
10 |
+
|
11 |
+
## Usage
|
12 |
+
This pipeline is a combination of consecutive training and testing of GMM + TOM. GMM generates the warped clothes according to the target human. Then, TOM blends the warped clothes outputs from GMM into the target human properties, to generate the final try-on output.
|
13 |
+
|
14 |
+
1) Install the requirements
|
15 |
+
2) Download/Prepare the dataset
|
16 |
+
3) Train GMM network
|
17 |
+
4) Get warped clothes for training set with trained GMM network, and copy warped clothes & masks inside `data/train` directory
|
18 |
+
5) Train TOM network
|
19 |
+
6) Test GMM for testing set
|
20 |
+
7) Get warped clothes for testing set, copy warped clothes & masks inside `data/test` directory
|
21 |
+
8) Test TOM testing set
|
22 |
+
|
23 |
+
## Installation
|
24 |
+
This implementation is built and tested in PyTorch 0.4.1.
|
25 |
+
Pytorch and torchvision are recommended to install with conda: `conda install pytorch=0.4.1 torchvision=0.2.1 -c pytorch`
|
26 |
+
<br/>For all packages, run `pip install -r requirements.txt`
|
27 |
+
|
28 |
+
## Data preparation
|
29 |
+
For training/testing VITON dataset, our full and processed dataset is available here: https://1drv.ms/u/s!Ai8t8GAHdzVUiQRFmTPrtrAy0ZP5?e=rS1aK8. After downloading, unzip to your data directory.
|
30 |
+
|
31 |
+
## Training
|
32 |
+
Run `python train.py` with your specific usage options for GMM and TOM stage.
|
33 |
+
<br/>For example, GMM: ```python train.py --name GMM --stage GMM --workers 4 --save_count 5000 --shuffle```
|
34 |
+
<br/> Then run test.py for GMM network with the training dataset, which will generate the warped clothes and masks in "warp-cloth" and "warp-mask" folders inside the "result/GMM/train/" directory. Copy the "warp-cloth" and "warp-mask" folders into your data directory, for example inside "data/train" folder.
|
35 |
+
<br/>Run TOM stage, ```python train.py --name TOM --stage TOM --workers 4 --save_count 5000 --shuffle```
|
36 |
+
|
37 |
+
## Testing
|
38 |
+
Run 'python test.py' with your specific usage options.
|
39 |
+
<br/>For example, GMM: ```python test.py --name GMM --stage GMM --workers 4 --datamode test --data_list test_pairs.txt --checkpoint checkpoints/GMM/gmm_final.pth```
|
40 |
+
<br/> Then run test.py for GMM network with the testing dataset, which will generate the warped clothes and masks in "warp-cloth" and "warp-mask" folders inside the "result/GMM/test/" directory. Copy the "warp-cloth" and "warp-mask" folders into your data directory, for example inside "data/test" folder.
|
41 |
+
<br/>Run TOM stage: ```python test.py --name TOM --stage TOM --workers 4 --datamode test --data_list test_pairs.txt --checkpoint checkpoints/TOM/tom_final.pth```
|
42 |
+
|
43 |
+
## Inference/Demo
|
44 |
+
Download the pre-trained models from here: https://1drv.ms/u/s!Ai8t8GAHdzVUiQA-o3C7cnrfGN6O?e=EaRiFP.
|
45 |
+
Then run the same step as Testing to test/inference our model.
|
46 |
+
The code and pre-trained models are tested with pytorch 0.4.1, torchvision 0.2.1, opencv 4.1 and pillow 5.4.
|
47 |
+
|
48 |
+
### Testing with custom images
|
49 |
+
to run the model with custom internet images, make sure you have the following:
|
50 |
+
|
51 |
+
1) image (image of a person, crop/resize to 192 x 256 (width x height) pixels)
|
52 |
+
2) image-parse (you can generate with CIHP_PGN or Graphonomy pretrained networks from the person image. See this [comment](https://github.com/minar09/cp-vton-plus/issues/15#issuecomment-683403388))
|
53 |
+
3) cloth (in-shop cloth image, crop/resize to 192 x 256 (width x height) pixels)
|
54 |
+
4) cloth-mask (binary mask of cloth image, you can generate it with simple pillow/opencv function)
|
55 |
+
5) pose (pose keypoints of the person, generate with openpose COCO-18 model (OpenPose from the official repository is preferred))
|
56 |
+
6) Also, make a test_pairs.txt file for your custom images. Follow the VITON dataset format to keep same arrangements, otherwise you can modify the code.
|
57 |
+
|
58 |
+
### What to do in case of unexpected results
|
59 |
+
There are many factors that can make distorted/unexpected results. Can you please do the following?
|
60 |
+
|
61 |
+
1) First try the original viton dataset and test pair combinations, check the intermediate results and the final output. Check if they are as expected.
|
62 |
+
2) If the original viton results are not as expected, please check the issues raised in this github repo, people have already found several issues and see how they solved it.
|
63 |
+
3) If the original viton test results are as expected, then run your custom test sets and check the intermediate results and debug where its going wrong.
|
64 |
+
4) If you are testing with custom images then check the github repository readme and related issues on how to run with custom images.
|
65 |
+
|
66 |
+
Its difficult to understand your issue from only single image/output. As I mentioned, there are various factors. Please debug yourself step by step and see where its going wrong. Check all the available intermediate/final inputs/outputs visually, and check multiple cases to see if the issue is happening for all cases. Good luck to you!
|
67 |
+
|
68 |
+
|
69 |
+
## Citation
|
70 |
+
Please cite our paper in your publications if it helps your research:
|
71 |
+
```
|
72 |
+
@InProceedings{Minar_CPP_2020_CVPR_Workshops,
|
73 |
+
title={CP-VTON+: Clothing Shape and Texture Preserving Image-Based Virtual Try-On},
|
74 |
+
author={Minar, Matiur Rahman and Thai Thanh Tuan and Ahn, Heejune and Rosin, Paul and Lai, Yu-Kun},
|
75 |
+
booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
|
76 |
+
month = {June},
|
77 |
+
year = {2020}
|
78 |
+
}
|
79 |
+
```
|
80 |
+
|
81 |
+
### Acknowledgements
|
82 |
+
This implementation is largely based on the PyTorch implementation of [CP-VTON](https://github.com/sergeywong/cp-vton). We are extremely grateful for their public implementation.
|
groundedsegmentanything.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Grounded Segment Anything
|
3 |
+
emoji: 📚
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.24.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
magicanimate.md
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- # magic-edit.github.io -->
|
2 |
+
|
3 |
+
<p align="center">
|
4 |
+
|
5 |
+
<h2 align="center">MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</h2>
|
6 |
+
<p align="center">
|
7 |
+
<a href="https://scholar.google.com/citations?user=-4iADzMAAAAJ&hl=en"><strong>Zhongcong Xu</strong></a>
|
8 |
+
·
|
9 |
+
<a href="http://jeff95.me/"><strong>Jianfeng Zhang</strong></a>
|
10 |
+
·
|
11 |
+
<a href="https://scholar.google.com.sg/citations?user=8gm-CYYAAAAJ&hl=en"><strong>Jun Hao Liew</strong></a>
|
12 |
+
·
|
13 |
+
<a href="https://hanshuyan.github.io/"><strong>Hanshu Yan</strong></a>
|
14 |
+
·
|
15 |
+
<a href="https://scholar.google.com/citations?user=stQQf7wAAAAJ&hl=en"><strong>Jia-Wei Liu</strong></a>
|
16 |
+
·
|
17 |
+
<a href="https://zhangchenxu528.github.io/"><strong>Chenxu Zhang</strong></a>
|
18 |
+
·
|
19 |
+
<a href="https://sites.google.com/site/jshfeng/home"><strong>Jiashi Feng</strong></a>
|
20 |
+
·
|
21 |
+
<a href="https://sites.google.com/view/showlab"><strong>Mike Zheng Shou</strong></a>
|
22 |
+
<br>
|
23 |
+
<br>
|
24 |
+
<a href="https://arxiv.org/abs/2311.16498"><img src='https://img.shields.io/badge/arXiv-MagicAnimate-red' alt='Paper PDF'></a>
|
25 |
+
<a href='https://showlab.github.io/magicanimate'><img src='https://img.shields.io/badge/Project_Page-MagicAnimate-green' alt='Project Page'></a>
|
26 |
+
<a href='https://huggingface.co/spaces/zcxu-eric/magicanimate'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
|
27 |
+
<br>
|
28 |
+
<b>National University of Singapore | ByteDance</b>
|
29 |
+
</p>
|
30 |
+
|
31 |
+
<table align="center">
|
32 |
+
<tr>
|
33 |
+
<td>
|
34 |
+
<img src="assets/teaser/t4.gif">
|
35 |
+
</td>
|
36 |
+
<td>
|
37 |
+
<img src="assets/teaser/t2.gif">
|
38 |
+
</td>
|
39 |
+
</tr>
|
40 |
+
</table>
|
41 |
+
|
42 |
+
## 📢 News
|
43 |
+
* **[2023.12.4]** Release inference code and gradio demo. We are working to improve MagicAnimate, stay tuned!
|
44 |
+
* **[2023.11.23]** Release MagicAnimate paper and project page.
|
45 |
+
|
46 |
+
## 🏃♂️ Getting Started
|
47 |
+
Download the pretrained base models for [StableDiffusion V1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) and [MSE-finetuned VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse).
|
48 |
+
|
49 |
+
Download our MagicAnimate [checkpoints](https://huggingface.co/zcxu-eric/MagicAnimate).
|
50 |
+
|
51 |
+
Please follow the huggingface download instructions to download the above models and checkpoints, `git lfs` is recommended.
|
52 |
+
|
53 |
+
Place the based models and checkpoints as follows:
|
54 |
+
```bash
|
55 |
+
magic-animate
|
56 |
+
|----pretrained_models
|
57 |
+
|----MagicAnimate
|
58 |
+
|----appearance_encoder
|
59 |
+
|----diffusion_pytorch_model.safetensors
|
60 |
+
|----config.json
|
61 |
+
|----densepose_controlnet
|
62 |
+
|----diffusion_pytorch_model.safetensors
|
63 |
+
|----config.json
|
64 |
+
|----temporal_attention
|
65 |
+
|----temporal_attention.ckpt
|
66 |
+
|----sd-vae-ft-mse
|
67 |
+
|----config.json
|
68 |
+
|----diffusion_pytorch_model.safetensors
|
69 |
+
|----stable-diffusion-v1-5
|
70 |
+
|----scheduler
|
71 |
+
|----scheduler_config.json
|
72 |
+
|----text_encoder
|
73 |
+
|----config.json
|
74 |
+
|----pytorch_model.bin
|
75 |
+
|----tokenizer (all)
|
76 |
+
|----unet
|
77 |
+
|----diffusion_pytorch_model.bin
|
78 |
+
|----config.json
|
79 |
+
|----v1-5-pruned-emaonly.safetensors
|
80 |
+
|----...
|
81 |
+
```
|
82 |
+
|
83 |
+
## ⚒️ Installation
|
84 |
+
prerequisites: `python>=3.8`, `CUDA>=11.3`, and `ffmpeg`.
|
85 |
+
|
86 |
+
Install with `conda`:
|
87 |
+
```bash
|
88 |
+
conda env create -f environment.yaml
|
89 |
+
conda activate manimate
|
90 |
+
```
|
91 |
+
or `pip`:
|
92 |
+
```bash
|
93 |
+
pip3 install -r requirements.txt
|
94 |
+
```
|
95 |
+
|
96 |
+
## 💃 Inference
|
97 |
+
Run inference on single GPU:
|
98 |
+
```bash
|
99 |
+
bash scripts/animate.sh
|
100 |
+
```
|
101 |
+
Run inference with multiple GPUs:
|
102 |
+
```bash
|
103 |
+
bash scripts/animate_dist.sh
|
104 |
+
```
|
105 |
+
|
106 |
+
## 🎨 Gradio Demo
|
107 |
+
|
108 |
+
#### Online Gradio Demo:
|
109 |
+
Try our [online gradio demo](https://huggingface.co/spaces/zcxu-eric/magicanimate) quickly.
|
110 |
+
|
111 |
+
#### Local Gradio Demo:
|
112 |
+
Launch local gradio demo on single GPU:
|
113 |
+
```bash
|
114 |
+
python3 -m demo.gradio_animate
|
115 |
+
```
|
116 |
+
Launch local gradio demo if you have multiple GPUs:
|
117 |
+
```bash
|
118 |
+
python3 -m demo.gradio_animate_dist
|
119 |
+
```
|
120 |
+
Then open gradio demo in local browser.
|
121 |
+
|
122 |
+
## 🙏 Acknowledgements
|
123 |
+
We would like to thank [AK(@_akhaliq)](https://twitter.com/_akhaliq?lang=en) and huggingface team for the help of setting up oneline gradio demo.
|
124 |
+
|
125 |
+
## 🎓 Citation
|
126 |
+
If you find this codebase useful for your research, please use the following entry.
|
127 |
+
```BibTeX
|
128 |
+
@inproceedings{xu2023magicanimate,
|
129 |
+
author = {Xu, Zhongcong and Zhang, Jianfeng and Liew, Jun Hao and Yan, Hanshu and Liu, Jia-Wei and Zhang, Chenxu and Feng, Jiashi and Shou, Mike Zheng},
|
130 |
+
title = {MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model},
|
131 |
+
booktitle = {arXiv},
|
132 |
+
year = {2023}
|
133 |
+
}
|
134 |
+
```
|
135 |
+
|
style.css
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
div[data-testid="stForm"] {
|
2 |
+
position: fixed;
|
3 |
+
bottom: 0;
|
4 |
+
left: 0;
|
5 |
+
right: 0;
|
6 |
+
padding: 10px;
|
7 |
+
background-color: #f0f0f0; /* Adjust as needed */
|
8 |
+
border-top: 1px solid #ddd; /* Adjust as needed */
|
9 |
+
}
|
try.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import streamlit as st
|
2 |
+
|
3 |
+
# # Load CSS
|
4 |
+
# def load_css(file_name):
|
5 |
+
# with open(file_name) as f:
|
6 |
+
# st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
|
7 |
+
|
8 |
+
# load_css("style.css")
|
9 |
+
|
10 |
+
# # Your Streamlit chat input component
|
11 |
+
# with st.form("my_form"):
|
12 |
+
# text_input = st.text_input("Enter your message:")
|
13 |
+
# submit_button = st.form_submit_button("Send")
|
14 |
+
# import streamlit as st
|
15 |
+
|
16 |
+
# # Load CSS
|
17 |
+
# def load_css(file_name):
|
18 |
+
# with open(file_name) as f:
|
19 |
+
# st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
|
20 |
+
|
21 |
+
# load_css("style.css")
|
22 |
+
|
23 |
+
# # Create a container for the chat
|
24 |
+
# chat_container = st.container()
|
25 |
+
|
26 |
+
# with chat_container:
|
27 |
+
# with st.form("my_form"):
|
28 |
+
# text_input = st.text_input("Enter your message:")
|
29 |
+
# submit_button = st.form_submit_button("Send")
|
30 |
+
|
31 |
+
|
32 |
+
import streamlit as st
|
33 |
+
from streamlit_chat import message
|
34 |
+
|
35 |
+
# Initialize session state
|
36 |
+
if "history" not in st.session_state:
|
37 |
+
st.session_state.history = []
|
38 |
+
|
39 |
+
# Styling (adjust as needed)
|
40 |
+
st.markdown("""
|
41 |
+
<style>
|
42 |
+
.sender { background-color: #d1e8f6; align-self: flex-start; }
|
43 |
+
.receiver { background-color: #f0f0f0; align-self: flex-end; }
|
44 |
+
</style>
|
45 |
+
""", unsafe_allow_html=True)
|
46 |
+
|
47 |
+
def generate_response(input_message):
|
48 |
+
# Placeholder for now; you'll likely plug in an AI model here
|
49 |
+
return f"Echo: {input_message}"
|
50 |
+
|
51 |
+
def display_chat():
|
52 |
+
for i, chat in enumerate(st.session_state.history):
|
53 |
+
if chat['is_user']:
|
54 |
+
message(chat['message'], key=str(i) + "_user", is_user=True, avatar_style="sender")
|
55 |
+
else:
|
56 |
+
message(chat['message'], key=str(i) + "_bot", avatar_style="receiver")
|
57 |
+
|
58 |
+
# App Layout
|
59 |
+
st.title("Social Messenger Demo")
|
60 |
+
col1, col2 = st.columns(2)
|
61 |
+
|
62 |
+
with col1:
|
63 |
+
user_input = st.text_input("Enter your message", key="input")
|
64 |
+
if st.button("Send", key="send"):
|
65 |
+
st.session_state.history.append({"message": user_input, "is_user": True})
|
66 |
+
|
67 |
+
with col2:
|
68 |
+
if user_input:
|
69 |
+
response = generate_response(user_input)
|
70 |
+
st.session_state.history.append({"message": response, "is_user": False})
|
71 |
+
|
72 |
+
# Display the chat history
|
73 |
+
display_chat()
|
ui.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
st.set_page_config (page_icon="icon.jpg",page_title="Content Moderation",layout="centered")
|
3 |
+
# content_type=st.selectbox("Content Type",["Text","Image","Video"])
|
4 |
+
st.markdown("<h1 style='color: var(--black-100, var(--black-100, #1C1C1C));text-align: center;font-feature-settings: 'cv11' on, 'cv01' on, 'ss01' on;font-family: Poppins;font-size: 48px;font-style: normal;font-weight: 600;line-height: 58px;'>Content Moderation</h1>",
|
5 |
+
unsafe_allow_html=True)
|
6 |
+
c1,c2=st.columns(2)
|
7 |
+
with c1 :
|
8 |
+
content_type=st.selectbox("Content Type",["Text","Image","Video"])
|
9 |
+
input=st.text_area(":blue[My text here :]",height=500)
|
10 |
+
if "Video" in content_type:
|
11 |
+
def generate_text(project_id: str, location: str) -> str:
|
12 |
+
# Initialize Vertex AI
|
13 |
+
# vertexai.init(project=project_id, location=location)
|
14 |
+
# Load the model
|
15 |
+
# vision_model = GenerativeModel("gemini-pro-vision")
|
16 |
+
# Generate text
|
17 |
+
response = vision_model.generate_content(
|
18 |
+
[
|
19 |
+
Part.from_uri(
|
20 |
+
"gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4"
|
21 |
+
),
|
22 |
+
"What is in the video?",
|
23 |
+
]
|
24 |
+
)
|
25 |
+
print(response)
|
26 |
+
return response.text
|
updated_prompt.txt
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Generate a chat where user input is understood and analysed and declared as which tone it belongs to
|
2 |
+
we have two roles 1.celebrity and 2.fans
|
3 |
+
only fans will send inputs
|
4 |
+
celebrity will not respond
|
5 |
+
from {role=fans} provides input ,if the tone of content is negative {example: offensive,rude,sarcastic,violating etc} the model should block the message .
|
6 |
+
else the message should be delivered to {role=celebrity)page
|
7 |
+
|
8 |
+
|
9 |
+
import vertexai
|
10 |
+
from vertexai.preview.language_models import TextGenerationModel
|
11 |
+
|
12 |
+
vertexai.init(project="agileai-poc", location="us-central1")
|
13 |
+
parameters = {
|
14 |
+
"max_output_tokens": 2048,
|
15 |
+
"temperature": 0.9,
|
16 |
+
"top_p": 1
|
17 |
+
}
|
18 |
+
model = TextGenerationModel.from_pretrained("gemini-pro")
|
19 |
+
response = model.predict(
|
20 |
+
"""Generate a chat where user input is understood and analysed and declared as which tone it belongs to
|
21 |
+
we have two roles 1.celebrity and 2.fans
|
22 |
+
only fans will send inputs
|
23 |
+
celebrity will not respond
|
24 |
+
from {role=fan} provides input ,if the tone of content is negative {example: offensive,rude,sarcastic,violating etc} the model should block the message .
|
25 |
+
else the message should be delivered to {role=celebrity)page
|
26 |
+
|
27 |
+
Fans: His presence commands every scene, radiating a magnetic charisma that captivates and holds the audience spellbound.
|
28 |
+
|
29 |
+
output: His presence commands every scene, radiating a magnetic charisma that captivates and holds the audience spellbound.
|
30 |
+
|
31 |
+
|
32 |
+
Fans: Salaar makes for an almost satisfying theatrical experience but it leaves a lot of things open-ended. While it does deliver on the promise of a violent action film, it somehow fails to connect much at an emotional level.
|
33 |
+
output: Blocked
|
34 |
+
""",
|
35 |
+
**parameters
|
36 |
+
)
|
37 |
+
print(f"Response from Model: {response.text}")
|