|
import weaviate |
|
from llama_index import ServiceContext, VectorStoreIndex |
|
from llama_index.llms import LOCALAI_DEFAULTS, OpenAILike |
|
from llama_index.vector_stores import WeaviateVectorStore |
|
|
|
|
|
vector_store = WeaviateVectorStore( |
|
weaviate_client=weaviate.Client("http://weviate.default"), index_name="AIChroma" |
|
) |
|
|
|
|
|
llm = OpenAILike(temperature=0, model="gpt-3.5-turbo", **LOCALAI_DEFAULTS) |
|
|
|
|
|
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local") |
|
|
|
|
|
index = VectorStoreIndex.from_vector_store( |
|
vector_store, service_context=service_context |
|
) |
|
|
|
|
|
query_engine = index.as_query_engine( |
|
similarity_top_k=1, vector_store_query_mode="hybrid" |
|
) |
|
|
|
|
|
response = query_engine.query("What is LocalAI?") |
|
print(response) |
|
|