cmagganas commited on
Commit
f34acee
·
1 Parent(s): 393e1ec

Rename tools.py to extract_app.py

Browse files
Files changed (2) hide show
  1. extract_app.py +51 -0
  2. tools.py +0 -73
extract_app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Imports
3
+ import asyncio
4
+ import os
5
+ import openai
6
+
7
+ from typing import List, Optional
8
+ from pydantic import BaseModel, Field
9
+
10
+ from langchain.chains.openai_functions.extraction import create_extraction_chain_pydantic
11
+ from langchain.chat_models import ChatOpenAI
12
+ from langchain.prompts import ChatPromptTemplate
13
+ from langchain.pydantic_v1 import BaseModel
14
+ from langchain.utils.openai_functions import convert_pydantic_to_openai_function
15
+
16
+ from dotenv import load_dotenv
17
+ load_dotenv()
18
+
19
+ openai.api_key = os.environ['OPENAI_API_KEY']
20
+
21
+
22
+ # App
23
+
24
+ # Pydantic is an easy way to define a schema
25
+ class Person(BaseModel):
26
+ """Information about people to extract."""
27
+
28
+ name: str
29
+ age: Optional[int] = None
30
+
31
+ # Main function to extract information
32
+ def extract_information():
33
+ # Make sure to use a recent model that supports tools
34
+ llm = ChatOpenAI(model="gpt-3.5-turbo-1106")
35
+
36
+ return create_extraction_chain_pydantic(Person, llm)
37
+
38
+
39
+ if __name__ == "__main__":
40
+ text = "My name is John and I am 20 years old. My name is sally and I am 30 years old."
41
+ chain = extract_information()
42
+ print(chain.invoke({"input": text})["text"])
43
+
44
+ async def extract_information_async(message: str):
45
+ return chain.invoke({"input": message})["text"]
46
+
47
+ async def main():
48
+ res = await extract_information_async(text)
49
+ print(res)
50
+
51
+ asyncio.run(main())
tools.py DELETED
@@ -1,73 +0,0 @@
1
- import io
2
- import os
3
- from openai import OpenAI
4
- from langchain.tools import StructuredTool, Tool
5
- from io import BytesIO
6
- import requests
7
- import json
8
- from io import BytesIO
9
-
10
- import chainlit as cl
11
-
12
-
13
- def get_image_name():
14
- """
15
- We need to keep track of images we generate, so we can reference them later
16
- and display them correctly to our users.
17
- """
18
- image_count = cl.user_session.get("image_count")
19
- if image_count is None:
20
- image_count = 0
21
- else:
22
- image_count += 1
23
-
24
- cl.user_session.set("image_count", image_count)
25
-
26
- return f"image-{image_count}"
27
-
28
-
29
- def _generate_image(prompt: str):
30
- """
31
- This function is used to generate an image from a text prompt using
32
- DALL-E 3.
33
-
34
- We use the OpenAI API to generate the image, and then store it in our
35
- user session so we can reference it later.
36
- """
37
- client = OpenAI()
38
-
39
- response = client.images.generate(
40
- model="dall-e-3",
41
- prompt=prompt,
42
- size="1024x1024",
43
- quality="standard",
44
- n=1,
45
- )
46
-
47
- image_payload = requests.get(response.data[0].url, stream=True)
48
-
49
- image_bytes = BytesIO(image_payload.content)
50
-
51
- print(type(image_bytes))
52
-
53
- name = get_image_name()
54
- cl.user_session.set(name, image_bytes.getvalue())
55
- cl.user_session.set("generated_image", name)
56
- return name
57
-
58
-
59
- def generate_image(prompt: str):
60
- image_name = _generate_image(prompt)
61
- return f"Here is {image_name}."
62
-
63
-
64
- # this is our tool - which is what allows our agent to generate images in the first place!
65
- # the `description` field is of utmost imporance as it is what the LLM "brain" uses to determine
66
- # which tool to use for a given input.
67
- generate_image_format = '{{"prompt": "prompt"}}'
68
- generate_image_tool = Tool.from_function(
69
- func=generate_image,
70
- name="GenerateImage",
71
- description=f"Useful to create an image from a text prompt. Input should be a single string strictly in the following JSON format: {generate_image_format}",
72
- return_direct=True,
73
- )