method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
test_send | """Test gmail send."""
mock_api_resource = MagicMock()
tool = GmailSendMessage.construct(api_resource=mock_api_resource)
tool_input = {'to': '[email protected]', 'subject': 'subject line',
'message': 'message body'}
result = tool.run(tool_input)
assert result.startswith('Message sent. Message Id:')
assert tool.args_schema is not None | def test_send() ->None:
"""Test gmail send."""
mock_api_resource = MagicMock()
tool = GmailSendMessage.construct(api_resource=mock_api_resource)
tool_input = {'to': '[email protected]', 'subject': 'subject line',
'message': 'message body'}
result = tool.run(tool_input)
assert result.startswith('Message sent. Message Id:')
assert tool.args_schema is not None | Test gmail send. |
_call | """Generate gremlin statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_gremlin = self.gremlin_generation_chain.run({'question': question,
'schema': self.graph.get_schema}, callbacks=callbacks)
_run_manager.on_text('Generated gremlin:', end='\n', verbose=self.verbose)
_run_manager.on_text(generated_gremlin, color='green', end='\n', verbose=
self.verbose)
context = self.graph.query(generated_gremlin)
_run_manager.on_text('Full Context:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(context), color='green', end='\n', verbose=self.
verbose)
result = self.qa_chain({'question': question, 'context': context},
callbacks=callbacks)
return {self.output_key: result[self.qa_chain.output_key]} | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
"""Generate gremlin statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_gremlin = self.gremlin_generation_chain.run({'question':
question, 'schema': self.graph.get_schema}, callbacks=callbacks)
_run_manager.on_text('Generated gremlin:', end='\n', verbose=self.verbose)
_run_manager.on_text(generated_gremlin, color='green', end='\n',
verbose=self.verbose)
context = self.graph.query(generated_gremlin)
_run_manager.on_text('Full Context:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(context), color='green', end='\n', verbose=
self.verbose)
result = self.qa_chain({'question': question, 'context': context},
callbacks=callbacks)
return {self.output_key: result[self.qa_chain.output_key]} | Generate gremlin statement, use it to look up in db and answer question. |
validate_environment | """Validate authenticate data in environment and python package is installed."""
try:
import gigachat
except ImportError:
raise ImportError(
'Could not import gigachat python package. Please install it with `pip install gigachat`.'
)
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate authenticate data in environment and python package is installed."""
try:
import gigachat
except ImportError:
raise ImportError(
'Could not import gigachat python package. Please install it with `pip install gigachat`.'
)
return values | Validate authenticate data in environment and python package is installed. |
list_to_objects | """Helper to convert dict objects."""
return [namedtuple('Object', d.keys())(**d) for d in dict_list if
isinstance(d, dict)] | def list_to_objects(dict_list: list) ->list:
"""Helper to convert dict objects."""
return [namedtuple('Object', d.keys())(**d) for d in dict_list if
isinstance(d, dict)] | Helper to convert dict objects. |
test_chroma_with_metadatas_with_scores_using_vector | """Test end to end construction and scored search, using embedding vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=embeddings, metadatas=metadatas)
embedded_query = embeddings.embed_query('foo')
output = docsearch.similarity_search_by_vector_with_relevance_scores(embedding
=embedded_query, k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)] | def test_chroma_with_metadatas_with_scores_using_vector() ->None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=embeddings, metadatas=metadatas)
embedded_query = embeddings.embed_query('foo')
output = docsearch.similarity_search_by_vector_with_relevance_scores(
embedding=embedded_query, k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)] | Test end to end construction and scored search, using embedding vector. |
_similarity_search_with_score | """Returns a list of documents with their scores
Args:
embeddings: The query vector
k: the number of documents to return
Returns:
A list of documents closest to the query vector
"""
pipeline: List[dict[str, Any]] = [{'$search': {'cosmosSearch': {'vector':
embeddings, 'path': self._embedding_key, 'k': k}, 'returnStoredSource':
True}}, {'$project': {'similarityScore': {'$meta': 'searchScore'},
'document': '$$ROOT'}}]
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
score = res.pop('similarityScore')
document_object_field = res.pop('document')
text = document_object_field.pop(self._text_key)
docs.append((Document(page_content=text, metadata=document_object_field
), score))
return docs | def _similarity_search_with_score(self, embeddings: List[float], k: int=4
) ->List[Tuple[Document, float]]:
"""Returns a list of documents with their scores
Args:
embeddings: The query vector
k: the number of documents to return
Returns:
A list of documents closest to the query vector
"""
pipeline: List[dict[str, Any]] = [{'$search': {'cosmosSearch': {
'vector': embeddings, 'path': self._embedding_key, 'k': k},
'returnStoredSource': True}}, {'$project': {'similarityScore': {
'$meta': 'searchScore'}, 'document': '$$ROOT'}}]
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
score = res.pop('similarityScore')
document_object_field = res.pop('document')
text = document_object_field.pop(self._text_key)
docs.append((Document(page_content=text, metadata=
document_object_field), score))
return docs | Returns a list of documents with their scores
Args:
embeddings: The query vector
k: the number of documents to return
Returns:
A list of documents closest to the query vector |
validate_environment | """Validate that the python package exists in environment."""
try:
import tensorflow
except ImportError:
raise ImportError(
'Could not import tensorflow python package. Please install it with `pip install tensorflow`.'
)
try:
import tensorflow_datasets
except ImportError:
raise ImportError(
'Could not import tensorflow_datasets python package. Please install it with `pip install tensorflow-datasets`.'
)
if values['sample_to_document_function'] is None:
raise ValueError(
'sample_to_document_function is None. Please provide a function that converts a dataset sample to a Document.'
)
values['dataset'] = tensorflow_datasets.load(values['dataset_name'], split=
values['split_name'])
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in environment."""
try:
import tensorflow
except ImportError:
raise ImportError(
'Could not import tensorflow python package. Please install it with `pip install tensorflow`.'
)
try:
import tensorflow_datasets
except ImportError:
raise ImportError(
'Could not import tensorflow_datasets python package. Please install it with `pip install tensorflow-datasets`.'
)
if values['sample_to_document_function'] is None:
raise ValueError(
'sample_to_document_function is None. Please provide a function that converts a dataset sample to a Document.'
)
values['dataset'] = tensorflow_datasets.load(values['dataset_name'],
split=values['split_name'])
return values | Validate that the python package exists in environment. |
test_edenai_call | """Test simple call to edenai."""
llm = EdenAI(provider='openai', temperature=0.2, max_tokens=250)
output = llm('Say foo:')
assert llm._llm_type == 'edenai'
assert llm.feature == 'text'
assert llm.subfeature == 'generation'
assert isinstance(output, str) | def test_edenai_call() ->None:
"""Test simple call to edenai."""
llm = EdenAI(provider='openai', temperature=0.2, max_tokens=250)
output = llm('Say foo:')
assert llm._llm_type == 'edenai'
assert llm.feature == 'text'
assert llm.subfeature == 'generation'
assert isinstance(output, str) | Test simple call to edenai. |
test_labeled_pairwise_string_comparison_chain_missing_ref | llm = FakeLLM(queries={'a': 'This is a rather good answer. Rating: [[9]]'},
sequential_responses=True)
chain = LabeledScoreStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction='I like pie.', input=
'What is your favorite food?') | def test_labeled_pairwise_string_comparison_chain_missing_ref() ->None:
llm = FakeLLM(queries={'a':
'This is a rather good answer. Rating: [[9]]'},
sequential_responses=True)
chain = LabeledScoreStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction='I like pie.', input=
'What is your favorite food?') | null |
test_vertexai_stream | model = ChatVertexAI(temperature=0, model_name=model_name)
message = HumanMessage(content='Hello')
sync_response = model.stream([message])
for chunk in sync_response:
assert isinstance(chunk, AIMessageChunk) | @pytest.mark.scheduled
@pytest.mark.parametrize('model_name', ['chat-bison@001', 'gemini-pro'])
def test_vertexai_stream(model_name: str) ->None:
model = ChatVertexAI(temperature=0, model_name=model_name)
message = HumanMessage(content='Hello')
sync_response = model.stream([message])
for chunk in sync_response:
assert isinstance(chunk, AIMessageChunk) | null |
test_api_key_is_secret_string | llm = Anyscale(anyscale_api_key='secret-api-key', anyscale_api_base='test',
model_name='test')
assert isinstance(llm.anyscale_api_key, SecretStr) | @pytest.mark.requires('openai')
def test_api_key_is_secret_string() ->None:
llm = Anyscale(anyscale_api_key='secret-api-key', anyscale_api_base=
'test', model_name='test')
assert isinstance(llm.anyscale_api_key, SecretStr) | null |
from_examples | """Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [' '.join(sorted_values({k: eg[k] for k in input_keys
})) for eg in examples]
else:
string_examples = [' '.join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(string_examples, embeddings,
metadatas=examples, **vectorstore_cls_kwargs)
return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys
) | @classmethod
def from_examples(cls, examples: List[dict], embeddings: Embeddings,
vectorstore_cls: Type[VectorStore], k: int=4, input_keys: Optional[List
[str]]=None, fetch_k: int=20, **vectorstore_cls_kwargs: Any
) ->MaxMarginalRelevanceExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [' '.join(sorted_values({k: eg[k] for k in
input_keys})) for eg in examples]
else:
string_examples = [' '.join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(string_examples, embeddings,
metadatas=examples, **vectorstore_cls_kwargs)
return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=
input_keys) | Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store. |
test_lazy_load_success | """Test that returns the correct answer"""
output = list(tfds_client.lazy_load())
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict) | def test_lazy_load_success(tfds_client: TensorflowDatasetLoader) ->None:
"""Test that returns the correct answer"""
output = list(tfds_client.lazy_load())
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict) | Test that returns the correct answer |
_embedding_vector_column_dimension | """Return the dimension of the embedding vector column.
None if the index is not a self-managed embedding index.
"""
return self._embedding_vector_column().get('embedding_dimension') | def _embedding_vector_column_dimension(self) ->Optional[int]:
"""Return the dimension of the embedding vector column.
None if the index is not a self-managed embedding index.
"""
return self._embedding_vector_column().get('embedding_dimension') | Return the dimension of the embedding vector column.
None if the index is not a self-managed embedding index. |
test_visit_comparison_range_match | comp = Comparison(comparator=Comparator.CONTAIN, attribute='foo', value='1')
expected = {'match': {'metadata.foo': {'query': '1'}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | def test_visit_comparison_range_match() ->None:
comp = Comparison(comparator=Comparator.CONTAIN, attribute='foo', value='1'
)
expected = {'match': {'metadata.foo': {'query': '1'}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | null |
test_analyticdb | """Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')] | def test_analyticdb() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')] | Test end to end construction and search. |
similarity_search_limit_score | """
Returns the most similar indexed documents to the query text within the
score_threshold range.
Deprecated: Use similarity_search with distance_threshold instead.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching *distance* required
for a document to be considered a match. Defaults to 0.2.
Returns:
List[Document]: A list of documents that are most similar to the query text
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
return self.similarity_search(query, k=k, distance_threshold=
score_threshold, **kwargs) | @deprecated('0.0.272', alternative='similarity_search(distance_threshold=0.1)')
def similarity_search_limit_score(self, query: str, k: int=4,
score_threshold: float=0.2, **kwargs: Any) ->List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range.
Deprecated: Use similarity_search with distance_threshold instead.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching *distance* required
for a document to be considered a match. Defaults to 0.2.
Returns:
List[Document]: A list of documents that are most similar to the query text
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
return self.similarity_search(query, k=k, distance_threshold=
score_threshold, **kwargs) | Returns the most similar indexed documents to the query text within the
score_threshold range.
Deprecated: Use similarity_search with distance_threshold instead.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching *distance* required
for a document to be considered a match. Defaults to 0.2.
Returns:
List[Document]: A list of documents that are most similar to the query text
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned. |
_stream | message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts, run_manager
=run_manager, **params):
if len(chunk['choices']) == 0:
continue
choice = chunk['choices'][0]
chunk = _convert_delta_to_message_chunk(choice['delta'],
default_chunk_class)
finish_reason = choice.get('finish_reason')
generation_info = dict(finish_reason=finish_reason
) if finish_reason is not None else None
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) | def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts,
run_manager=run_manager, **params):
if len(chunk['choices']) == 0:
continue
choice = chunk['choices'][0]
chunk = _convert_delta_to_message_chunk(choice['delta'],
default_chunk_class)
finish_reason = choice.get('finish_reason')
generation_info = dict(finish_reason=finish_reason
) if finish_reason is not None else None
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=
generation_info)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) | null |
json_equality_evaluator | return JsonEqualityEvaluator() | @pytest.fixture
def json_equality_evaluator() ->JsonEqualityEvaluator:
return JsonEqualityEvaluator() | null |
validate_environment | """Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(values, 'mosaicml_api_token',
'MOSAICML_API_TOKEN')
values['mosaicml_api_token'] = mosaicml_api_token
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(values, 'mosaicml_api_token',
'MOSAICML_API_TOKEN')
values['mosaicml_api_token'] = mosaicml_api_token
return values | Validate that api key and python package exists in environment. |
test_runnable_context_seq_key_circular_ref | seq: Runnable = {'bar': Context.setter(input=Context.getter('input'))
} | Context.getter('foo')
with pytest.raises(ValueError):
seq.invoke('foo') | def test_runnable_context_seq_key_circular_ref() ->None:
seq: Runnable = {'bar': Context.setter(input=Context.getter('input'))
} | Context.getter('foo')
with pytest.raises(ValueError):
seq.invoke('foo') | null |
test_loading_with_template_as_file | """Test loading when the template is a file."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt('simple_prompt_with_template_file.json')
expected_prompt = PromptTemplate(input_variables=['adjective',
'content'], template='Tell me a {adjective} joke about {content}.')
assert prompt == expected_prompt | def test_loading_with_template_as_file() ->None:
"""Test loading when the template is a file."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt('simple_prompt_with_template_file.json')
expected_prompt = PromptTemplate(input_variables=['adjective',
'content'], template='Tell me a {adjective} joke about {content}.')
assert prompt == expected_prompt | Test loading when the template is a file. |
__init__ | """Initialize the PromptLayerCallbackHandler."""
_lazy_import_promptlayer()
self.pl_id_callback = pl_id_callback
self.pl_tags = pl_tags or []
self.runs: Dict[UUID, Dict[str, Any]] = {} | def __init__(self, pl_id_callback: Optional[Callable[..., Any]]=None,
pl_tags: Optional[List[str]]=None) ->None:
"""Initialize the PromptLayerCallbackHandler."""
_lazy_import_promptlayer()
self.pl_id_callback = pl_id_callback
self.pl_tags = pl_tags or []
self.runs: Dict[UUID, Dict[str, Any]] = {} | Initialize the PromptLayerCallbackHandler. |
_kwargs_retrying | kwargs: Dict[str, Any] = dict()
if self.max_attempt_number:
kwargs['stop'] = stop_after_attempt(self.max_attempt_number)
if self.wait_exponential_jitter:
kwargs['wait'] = wait_exponential_jitter()
if self.retry_exception_types:
kwargs['retry'] = retry_if_exception_type(self.retry_exception_types)
return kwargs | @property
def _kwargs_retrying(self) ->Dict[str, Any]:
kwargs: Dict[str, Any] = dict()
if self.max_attempt_number:
kwargs['stop'] = stop_after_attempt(self.max_attempt_number)
if self.wait_exponential_jitter:
kwargs['wait'] = wait_exponential_jitter()
if self.retry_exception_types:
kwargs['retry'] = retry_if_exception_type(self.retry_exception_types)
return kwargs | null |
strip_markdown_code | """Strip markdown code from a string."""
stripped_string = re.sub('^`{1,3}.*?\\n', '', md_string, flags=re.DOTALL)
stripped_string = re.sub('`{1,3}$', '', stripped_string)
return stripped_string | def strip_markdown_code(md_string: str) ->str:
"""Strip markdown code from a string."""
stripped_string = re.sub('^`{1,3}.*?\\n', '', md_string, flags=re.DOTALL)
stripped_string = re.sub('`{1,3}$', '', stripped_string)
return stripped_string | Strip markdown code from a string. |
__is_non_html_available | _unstructured_version = self.__version.split('-')[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split('.')]
)
return unstructured_version >= (0, 5, 12) | def __is_non_html_available(self) ->bool:
_unstructured_version = self.__version.split('-')[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.
split('.')])
return unstructured_version >= (0, 5, 12) | null |
_import_google_places_api | from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
return GooglePlacesAPIWrapper | def _import_google_places_api() ->Any:
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
return GooglePlacesAPIWrapper | null |
_convert_message_to_dict | if isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
else:
raise ValueError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict | def _convert_message_to_dict(self, message: BaseMessage) ->Dict[str, Any]:
if isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
else:
raise ValueError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict | null |
build_extra | """Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values | @root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values | Build extra kwargs from additional params that were passed in. |
test_get_erc20_transaction | account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='erc20_transaction')
result = loader.load()
assert len(result) > 0, 'No transactions returned' | @pytest.mark.skipif(not etherscan_key_set, reason=
'Etherscan API key not provided.')
def test_get_erc20_transaction() ->None:
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='erc20_transaction')
result = loader.load()
assert len(result) > 0, 'No transactions returned' | null |
finalize | """Finalize the property."""
return _deprecated_property(fget=obj.fget, fset=obj.fset, fdel=obj.fdel,
doc=new_doc) | def finalize(_: Any, new_doc: str) ->Any:
"""Finalize the property."""
return _deprecated_property(fget=obj.fget, fset=obj.fset, fdel=obj.fdel,
doc=new_doc) | Finalize the property. |
test_fireworks_batch | """Tests completion with invoke"""
llm = Fireworks()
output = llm.batch(['How is the weather in New York today?',
'How is the weather in New York today?'], stop=[','])
for token in output:
assert isinstance(token, str)
assert token[-1] == ',' | @pytest.mark.scheduled
def test_fireworks_batch(llm: Fireworks) ->None:
"""Tests completion with invoke"""
llm = Fireworks()
output = llm.batch(['How is the weather in New York today?',
'How is the weather in New York today?'], stop=[','])
for token in output:
assert isinstance(token, str)
assert token[-1] == ',' | Tests completion with invoke |
delete | """Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
timestamp: Optional timestamp to delete with.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
external_ids = np.array(ids).astype(np.uint64)
self.vector_index.delete_batch(external_ids=external_ids, timestamp=
timestamp if timestamp != 0 else None)
return True | def delete(self, ids: Optional[List[str]]=None, timestamp: int=0, **kwargs: Any
) ->Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
timestamp: Optional timestamp to delete with.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
external_ids = np.array(ids).astype(np.uint64)
self.vector_index.delete_batch(external_ids=external_ids, timestamp=
timestamp if timestamp != 0 else None)
return True | Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
timestamp: Optional timestamp to delete with.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented. |
test_saving_loading_llm | """Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAI(max_tokens=10)
llm.save(file_path=tmp_path / 'openai.yaml')
loaded_llm = load_llm(tmp_path / 'openai.yaml')
assert loaded_llm == llm | def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAI(max_tokens=10)
llm.save(file_path=tmp_path / 'openai.yaml')
loaded_llm = load_llm(tmp_path / 'openai.yaml')
assert loaded_llm == llm | Test saving/loading an promptlayer OpenAPI LLM. |
validate_environment | """Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(values, 'deepinfra_api_token',
'DEEPINFRA_API_TOKEN')
values['deepinfra_api_token'] = deepinfra_api_token
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(values,
'deepinfra_api_token', 'DEEPINFRA_API_TOKEN')
values['deepinfra_api_token'] = deepinfra_api_token
return values | Validate that api key and python package exists in environment. |
validate_jinja2 | """
Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables.
"""
input_variables_set = set(input_variables)
valid_variables = _get_jinja2_variables_from_template(template)
missing_variables = valid_variables - input_variables_set
extra_variables = input_variables_set - valid_variables
warning_message = ''
if missing_variables:
warning_message += f'Missing variables: {missing_variables} '
if extra_variables:
warning_message += f'Extra variables: {extra_variables}'
if warning_message:
warnings.warn(warning_message.strip()) | def validate_jinja2(template: str, input_variables: List[str]) ->None:
"""
Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables.
"""
input_variables_set = set(input_variables)
valid_variables = _get_jinja2_variables_from_template(template)
missing_variables = valid_variables - input_variables_set
extra_variables = input_variables_set - valid_variables
warning_message = ''
if missing_variables:
warning_message += f'Missing variables: {missing_variables} '
if extra_variables:
warning_message += f'Extra variables: {extra_variables}'
if warning_message:
warnings.warn(warning_message.strip()) | Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables. |
test_cohere_reranker_init | """Test the cohere reranker initializes correctly."""
CohereRerank() | def test_cohere_reranker_init() ->None:
"""Test the cohere reranker initializes correctly."""
CohereRerank() | Test the cohere reranker initializes correctly. |
get_collection | return CollectionStore.get_by_name(session, self.collection_name) | def get_collection(self, session: Session) ->Optional['CollectionStore']:
return CollectionStore.get_by_name(session, self.collection_name) | null |
similarity_search_by_vector | docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k,
radius, epsilon, timeout, grpc_metadata)
docs = []
for doc, _ in docs_and_scores:
docs.append(doc)
return docs | def similarity_search_by_vector(self, embedding: List[float], k: int=4,
radius: float=-1.0, epsilon: float=0.01, timeout: int=3000000000,
grpc_metadata: Optional[Any]=None, **kwargs: Any) ->List[Document]:
docs_and_scores = self.similarity_search_with_score_by_vector(embedding,
k, radius, epsilon, timeout, grpc_metadata)
docs = []
for doc, _ in docs_and_scores:
docs.append(doc)
return docs | null |
_get_mock_page | return {'id': f'{page_id}', 'title': f'Page {page_id}', 'body': {
f'{content_format.name.lower()}': {'value': f'<p>Content {page_id}</p>'
}}, 'status': 'current', 'type': 'page', '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}', 'tinyui':
'/x/tiny_ui_link', 'editui':
f'/pages/resumedraft.action?draftId={page_id}', 'webui':
f'/spaces/{self.MOCK_SPACE_KEY}/overview'}} | def _get_mock_page(self, page_id: str, content_format: ContentFormat=
ContentFormat.STORAGE) ->Dict:
return {'id': f'{page_id}', 'title': f'Page {page_id}', 'body': {
f'{content_format.name.lower()}': {'value':
f'<p>Content {page_id}</p>'}}, 'status': 'current', 'type': 'page',
'_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}', 'tinyui':
'/x/tiny_ui_link', 'editui':
f'/pages/resumedraft.action?draftId={page_id}', 'webui':
f'/spaces/{self.MOCK_SPACE_KEY}/overview'}} | null |
_config_with_context | if any(k.startswith(CONTEXT_CONFIG_PREFIX) for k in config.get(
'configurable', {})):
return config
context_specs = [(spec, i) for i, step in enumerate(steps) for spec in step
.config_specs if spec.id.startswith(CONTEXT_CONFIG_PREFIX)]
grouped_by_key = {key: list(group) for key, group in groupby(sorted(
context_specs, key=lambda s: s[0].id), key=lambda s: _key_from_id(s[0].id))
}
deps_by_key = {key: set(_key_from_id(dep) for spec in group for dep in spec
[0].dependencies or []) for key, group in grouped_by_key.items()}
values: Values = {}
events: DefaultDict[str, Union[asyncio.Event, threading.Event]] = defaultdict(
event_cls)
context_funcs: Dict[str, Callable[[], Any]] = {}
for key, group in grouped_by_key.items():
getters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_GET)]
setters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_SET)]
for dep in deps_by_key[key]:
if key in deps_by_key[dep]:
raise ValueError(
f'Deadlock detected between context keys {key} and {dep}')
if len(setters) != 1:
raise ValueError(f'Expected exactly one setter for context key {key}')
setter_idx = setters[0][1]
if any(getter_idx < setter_idx for _, getter_idx in getters):
raise ValueError(
f'Context setter for key {key} must be defined after all getters.')
if getters:
context_funcs[getters[0][0].id] = partial(getter, events[key], values)
context_funcs[setters[0][0].id] = partial(setter, events[key], values)
return patch_config(config, configurable=context_funcs) | def _config_with_context(config: RunnableConfig, steps: List[Runnable],
setter: Callable, getter: Callable, event_cls: Union[Type[threading.
Event], Type[asyncio.Event]]) ->RunnableConfig:
if any(k.startswith(CONTEXT_CONFIG_PREFIX) for k in config.get(
'configurable', {})):
return config
context_specs = [(spec, i) for i, step in enumerate(steps) for spec in
step.config_specs if spec.id.startswith(CONTEXT_CONFIG_PREFIX)]
grouped_by_key = {key: list(group) for key, group in groupby(sorted(
context_specs, key=lambda s: s[0].id), key=lambda s: _key_from_id(s
[0].id))}
deps_by_key = {key: set(_key_from_id(dep) for spec in group for dep in
spec[0].dependencies or []) for key, group in grouped_by_key.items()}
values: Values = {}
events: DefaultDict[str, Union[asyncio.Event, threading.Event]
] = defaultdict(event_cls)
context_funcs: Dict[str, Callable[[], Any]] = {}
for key, group in grouped_by_key.items():
getters = [s for s in group if s[0].id.endswith(
CONTEXT_CONFIG_SUFFIX_GET)]
setters = [s for s in group if s[0].id.endswith(
CONTEXT_CONFIG_SUFFIX_SET)]
for dep in deps_by_key[key]:
if key in deps_by_key[dep]:
raise ValueError(
f'Deadlock detected between context keys {key} and {dep}')
if len(setters) != 1:
raise ValueError(
f'Expected exactly one setter for context key {key}')
setter_idx = setters[0][1]
if any(getter_idx < setter_idx for _, getter_idx in getters):
raise ValueError(
f'Context setter for key {key} must be defined after all getters.'
)
if getters:
context_funcs[getters[0][0].id] = partial(getter, events[key],
values)
context_funcs[setters[0][0].id] = partial(setter, events[key], values)
return patch_config(config, configurable=context_funcs) | null |
load | iter = self.lazy_load()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False."
) from e
iter = tqdm(iter)
return list(iter) | def load(self) ->List[Document]:
iter = self.lazy_load()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False."
) from e
iter = tqdm(iter)
return list(iter) | null |
test_pgvector_collection_with_metadata | """Test end to end collection construction"""
pgvector = PGVector(collection_name='test_collection', collection_metadata=
{'foo': 'bar'}, embedding_function=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
session = Session(pgvector._create_engine())
collection = pgvector.get_collection(session)
if collection is None:
assert False, 'Expected a CollectionStore object but received None'
else:
assert collection.name == 'test_collection'
assert collection.cmetadata == {'foo': 'bar'} | def test_pgvector_collection_with_metadata() ->None:
"""Test end to end collection construction"""
pgvector = PGVector(collection_name='test_collection',
collection_metadata={'foo': 'bar'}, embedding_function=
FakeEmbeddingsWithAdaDimension(), connection_string=
CONNECTION_STRING, pre_delete_collection=True)
session = Session(pgvector._create_engine())
collection = pgvector.get_collection(session)
if collection is None:
assert False, 'Expected a CollectionStore object but received None'
else:
assert collection.name == 'test_collection'
assert collection.cmetadata == {'foo': 'bar'} | Test end to end collection construction |
test_add_recognizer_operator | """
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ['Sir', 'Madam', 'Professor']
custom_recognizer = PatternRecognizer(supported_entity='TITLE', deny_list=
titles_list)
anonymizer.add_recognizer(custom_recognizer)
text = 'Madam Jane Doe was here.'
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == '<TITLE> Jane Doe was here.'
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {'TITLE': OperatorConfig('replace', {'new_value': 'Dear'})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == 'Dear Jane Doe was here.' | @pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_add_recognizer_operator() ->None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ['Sir', 'Madam', 'Professor']
custom_recognizer = PatternRecognizer(supported_entity='TITLE',
deny_list=titles_list)
anonymizer.add_recognizer(custom_recognizer)
text = 'Madam Jane Doe was here.'
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == '<TITLE> Jane Doe was here.'
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {'TITLE': OperatorConfig('replace', {'new_value':
'Dear'})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == 'Dear Jane Doe was here.' | Test add recognizer and anonymize a new type of entity and with a custom operator |
lc_secrets | return {'mistral_api_key': 'MISTRAL_API_KEY'} | @property
def lc_secrets(self) ->Dict[str, str]:
return {'mistral_api_key': 'MISTRAL_API_KEY'} | null |
__del__ | if isinstance(self._bind, sqlalchemy.engine.Connection):
self._bind.close() | def __del__(self) ->None:
if isinstance(self._bind, sqlalchemy.engine.Connection):
self._bind.close() | null |
_check_deprecated_kwargs | """Check for deprecated kwargs."""
deprecated_kwargs = {'redis_host': 'redis_url', 'redis_port': 'redis_url',
'redis_password': 'redis_url', 'content_key': 'index_schema',
'vector_key': 'vector_schema', 'distance_metric': 'vector_schema'}
for key, value in kwargs.items():
if key in deprecated_kwargs:
raise ValueError(
f"Keyword argument '{key}' is deprecated. Please use '{deprecated_kwargs[key]}' instead."
) | def _check_deprecated_kwargs(self, kwargs: Mapping[str, Any]) ->None:
"""Check for deprecated kwargs."""
deprecated_kwargs = {'redis_host': 'redis_url', 'redis_port':
'redis_url', 'redis_password': 'redis_url', 'content_key':
'index_schema', 'vector_key': 'vector_schema', 'distance_metric':
'vector_schema'}
for key, value in kwargs.items():
if key in deprecated_kwargs:
raise ValueError(
f"Keyword argument '{key}' is deprecated. Please use '{deprecated_kwargs[key]}' instead."
) | Check for deprecated kwargs. |
_llm_type | return 'fake-messages-list-chat-model' | @property
def _llm_type(self) ->str:
return 'fake-messages-list-chat-model' | null |
test_sorted_vals | """Test sorted values from dictionary."""
test_dict = {'key2': 'val2', 'key1': 'val1'}
expected_response = ['val1', 'val2']
assert sorted_values(test_dict) == expected_response | def test_sorted_vals() ->None:
"""Test sorted values from dictionary."""
test_dict = {'key2': 'val2', 'key1': 'val1'}
expected_response = ['val1', 'val2']
assert sorted_values(test_dict) == expected_response | Test sorted values from dictionary. |
_import_tensorflow_datasets | from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets
return TensorflowDatasets | def _import_tensorflow_datasets() ->Any:
from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets
return TensorflowDatasets | null |
test_file_search | """Test the FileSearch tool."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool()
file_1 = Path(temp_dir) / 'file1.txt'
file_2 = Path(temp_dir) / 'file2.log'
file_1.write_text('File 1 content')
file_2.write_text('File 2 content')
matches = tool.run({'dir_path': temp_dir, 'pattern': '*.txt'}).split('\n')
assert len(matches) == 1
assert Path(matches[0]).name == 'file1.txt' | def test_file_search() ->None:
"""Test the FileSearch tool."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool()
file_1 = Path(temp_dir) / 'file1.txt'
file_2 = Path(temp_dir) / 'file2.log'
file_1.write_text('File 1 content')
file_2.write_text('File 2 content')
matches = tool.run({'dir_path': temp_dir, 'pattern': '*.txt'}).split(
'\n')
assert len(matches) == 1
assert Path(matches[0]).name == 'file1.txt' | Test the FileSearch tool. |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
apply | """Utilize the LLM generate method for speed gains."""
callback_manager = CallbackManager.configure(callbacks, self.callbacks,
self.verbose)
run_manager = callback_manager.on_chain_start(dumpd(self), {'input_list':
input_list})
try:
response = self.generate(input_list, run_manager=run_manager)
except BaseException as e:
run_manager.on_chain_error(e)
raise e
outputs = self.create_outputs(response)
run_manager.on_chain_end({'outputs': outputs})
return outputs | def apply(self, input_list: List[Dict[str, Any]], callbacks: Callbacks=None
) ->List[Dict[str, str]]:
"""Utilize the LLM generate method for speed gains."""
callback_manager = CallbackManager.configure(callbacks, self.callbacks,
self.verbose)
run_manager = callback_manager.on_chain_start(dumpd(self), {
'input_list': input_list})
try:
response = self.generate(input_list, run_manager=run_manager)
except BaseException as e:
run_manager.on_chain_error(e)
raise e
outputs = self.create_outputs(response)
run_manager.on_chain_end({'outputs': outputs})
return outputs | Utilize the LLM generate method for speed gains. |
create_assistant | """Create an OpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
instructions: Assistant instructions.
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
client: OpenAI client. Will create default client if not specified.
Returns:
OpenAIAssistantRunnable configured to run using the created assistant.
"""
client = client or _get_openai_client()
openai_tools: List = []
for tool in tools:
oai_tool = tool if isinstance(tool, dict) else format_tool_to_openai_tool(
tool)
openai_tools.append(oai_tool)
assistant = client.beta.assistants.create(name=name, instructions=
instructions, tools=openai_tools, model=model)
return cls(assistant_id=assistant.id, **kwargs) | @classmethod
def create_assistant(cls, name: str, instructions: str, tools: Sequence[
Union[BaseTool, dict]], model: str, *, client: Optional[openai.OpenAI]=
None, **kwargs: Any) ->OpenAIAssistantRunnable:
"""Create an OpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
instructions: Assistant instructions.
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
client: OpenAI client. Will create default client if not specified.
Returns:
OpenAIAssistantRunnable configured to run using the created assistant.
"""
client = client or _get_openai_client()
openai_tools: List = []
for tool in tools:
oai_tool = tool if isinstance(tool, dict
) else format_tool_to_openai_tool(tool)
openai_tools.append(oai_tool)
assistant = client.beta.assistants.create(name=name, instructions=
instructions, tools=openai_tools, model=model)
return cls(assistant_id=assistant.id, **kwargs) | Create an OpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
instructions: Assistant instructions.
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
client: OpenAI client. Will create default client if not specified.
Returns:
OpenAIAssistantRunnable configured to run using the created assistant. |
on_llm_error | """Run when LLM errors."""
self.metrics['step'] += 1
self.metrics['errors'] += 1 | def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when LLM errors."""
self.metrics['step'] += 1
self.metrics['errors'] += 1 | Run when LLM errors. |
expanded | """True if the expander was created with `expanded=True`."""
return self._expanded | @property
def expanded(self) ->bool:
"""True if the expander was created with `expanded=True`."""
return self._expanded | True if the expander was created with `expanded=True`. |
clean_excerpt | """Clean an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt.
"""
if not excerpt:
return excerpt
res = re.sub('\\s+', ' ', excerpt).replace('...', '')
return res | def clean_excerpt(excerpt: str) ->str:
"""Clean an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt.
"""
if not excerpt:
return excerpt
res = re.sub('\\s+', ' ', excerpt).replace('...', '')
return res | Clean an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt. |
test_flat_vector_field_optional_values | """Test optional values for FlatVectorField."""
flat_vector_field_data = {'name': 'example', 'dims': 100, 'algorithm':
'FLAT', 'initial_cap': 1000, 'block_size': 10}
flat_vector = FlatVectorField(**flat_vector_field_data)
assert flat_vector.initial_cap == 1000
assert flat_vector.block_size == 10 | def test_flat_vector_field_optional_values() ->None:
"""Test optional values for FlatVectorField."""
flat_vector_field_data = {'name': 'example', 'dims': 100, 'algorithm':
'FLAT', 'initial_cap': 1000, 'block_size': 10}
flat_vector = FlatVectorField(**flat_vector_field_data)
assert flat_vector.initial_cap == 1000
assert flat_vector.block_size == 10 | Test optional values for FlatVectorField. |
test_text_filter | txt_f = getattr(Text('text_field'), operation)(value)
assert str(txt_f) == expected | @pytest.mark.parametrize('operation, value, expected', [('__eq__', 'text',
'@text_field:("text")'), ('__ne__', 'text', '(-@text_field:"text")'), (
'__eq__', '', '*'), ('__ne__', '', '*'), ('__eq__', None, '*'), (
'__ne__', None, '*'), ('__mod__', 'text', '@text_field:(text)'), (
'__mod__', 'tex*', '@text_field:(tex*)'), ('__mod__', '%text%',
'@text_field:(%text%)'), ('__mod__', '', '*'), ('__mod__', None, '*')],
ids=['eq', 'ne', 'eq-empty', 'ne-empty', 'eq-none', 'ne-none', 'like',
'like_wildcard', 'like_full', 'like_empty', 'like_none'])
def test_text_filter(operation: str, value: Any, expected: str) ->None:
txt_f = getattr(Text('text_field'), operation)(value)
assert str(txt_f) == expected | null |
_parse_lc_messages | return [_parse_lc_message(message) for message in messages] | def _parse_lc_messages(messages: Union[List[BaseMessage], Any]) ->List[Dict
[str, Any]]:
return [_parse_lc_message(message) for message in messages] | null |
_import_json_tool_JsonListKeysTool | from langchain_community.tools.json.tool import JsonListKeysTool
return JsonListKeysTool | def _import_json_tool_JsonListKeysTool() ->Any:
from langchain_community.tools.json.tool import JsonListKeysTool
return JsonListKeysTool | null |
get_run_url | """Get the LangSmith root run URL"""
if not self.latest_run:
raise ValueError('No traced run found.')
for attempt in Retrying(stop=stop_after_attempt(5), wait=
wait_exponential_jitter(), retry=retry_if_exception_type(ls_utils.
LangSmithError)):
with attempt:
return self.client.get_run_url(run=self.latest_run, project_name=
self.project_name)
raise ValueError('Failed to get run URL.') | def get_run_url(self) ->str:
"""Get the LangSmith root run URL"""
if not self.latest_run:
raise ValueError('No traced run found.')
for attempt in Retrying(stop=stop_after_attempt(5), wait=
wait_exponential_jitter(), retry=retry_if_exception_type(ls_utils.
LangSmithError)):
with attempt:
return self.client.get_run_url(run=self.latest_run,
project_name=self.project_name)
raise ValueError('Failed to get run URL.') | Get the LangSmith root run URL |
analyze_text | """Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
complexity_metrics (bool): Whether to compute complexity metrics.
visualize (bool): Whether to visualize the text.
nlp (spacy.lang): The spacy language model to use for visualization.
output_dir (str): The directory to save the visualization files to.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized in a wandb.Html element.
"""
resp = {}
textstat = import_textstat()
wandb = import_wandb()
spacy = import_spacy()
if complexity_metrics:
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(text),
'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.automated_readability_index
(text), 'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat.
difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.gunning_fog(
text), 'text_standard': textstat.text_standard(text),
'fernandez_huerta': textstat.fernandez_huerta(text),
'szigriszt_pazos': textstat.szigriszt_pazos(text),
'gutierrez_polini': textstat.gutierrez_polini(text), 'crawford':
textstat.crawford(text), 'gulpease_index': textstat.gulpease_index(
text), 'osman': textstat.osman(text)}
resp.update(text_complexity_metrics)
if visualize and nlp and output_dir is not None:
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style='dep', jupyter=False, page=True)
dep_output_path = Path(output_dir, hash_string(f'dep-{text}') + '.html')
dep_output_path.open('w', encoding='utf-8').write(dep_out)
ent_out = spacy.displacy.render(doc, style='ent', jupyter=False, page=True)
ent_output_path = Path(output_dir, hash_string(f'ent-{text}') + '.html')
ent_output_path.open('w', encoding='utf-8').write(ent_out)
text_visualizations = {'dependency_tree': wandb.Html(str(
dep_output_path)), 'entities': wandb.Html(str(ent_output_path))}
resp.update(text_visualizations)
return resp | def analyze_text(text: str, complexity_metrics: bool=True, visualize: bool=
True, nlp: Any=None, output_dir: Optional[Union[str, Path]]=None) ->dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
complexity_metrics (bool): Whether to compute complexity metrics.
visualize (bool): Whether to visualize the text.
nlp (spacy.lang): The spacy language model to use for visualization.
output_dir (str): The directory to save the visualization files to.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized in a wandb.Html element.
"""
resp = {}
textstat = import_textstat()
wandb = import_wandb()
spacy = import_spacy()
if complexity_metrics:
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(
text), 'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.
automated_readability_index(text),
'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat
.difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.
gunning_fog(text), 'text_standard': textstat.text_standard(text
), 'fernandez_huerta': textstat.fernandez_huerta(text),
'szigriszt_pazos': textstat.szigriszt_pazos(text),
'gutierrez_polini': textstat.gutierrez_polini(text), 'crawford':
textstat.crawford(text), 'gulpease_index': textstat.
gulpease_index(text), 'osman': textstat.osman(text)}
resp.update(text_complexity_metrics)
if visualize and nlp and output_dir is not None:
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style='dep', jupyter=False,
page=True)
dep_output_path = Path(output_dir, hash_string(f'dep-{text}') + '.html'
)
dep_output_path.open('w', encoding='utf-8').write(dep_out)
ent_out = spacy.displacy.render(doc, style='ent', jupyter=False,
page=True)
ent_output_path = Path(output_dir, hash_string(f'ent-{text}') + '.html'
)
ent_output_path.open('w', encoding='utf-8').write(ent_out)
text_visualizations = {'dependency_tree': wandb.Html(str(
dep_output_path)), 'entities': wandb.Html(str(ent_output_path))}
resp.update(text_visualizations)
return resp | Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
complexity_metrics (bool): Whether to compute complexity metrics.
visualize (bool): Whether to visualize the text.
nlp (spacy.lang): The spacy language model to use for visualization.
output_dir (str): The directory to save the visualization files to.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized in a wandb.Html element. |
operator | return x['a'] == y['a'] | def operator(x: dict, y: dict) ->bool:
return x['a'] == y['a'] | null |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
pick | """Pick keys from the dict output of this runnable.
Returns a new runnable."""
from langchain_core.runnables.passthrough import RunnablePick
return self | RunnablePick(keys) | def pick(self, keys: Union[str, List[str]]) ->RunnableSerializable[Any, Any]:
"""Pick keys from the dict output of this runnable.
Returns a new runnable."""
from langchain_core.runnables.passthrough import RunnablePick
return self | RunnablePick(keys) | Pick keys from the dict output of this runnable.
Returns a new runnable. |
is_lc_serializable | return True | @classmethod
def is_lc_serializable(cls) ->bool:
return True | null |
preview | """Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({'preview_only': True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()['input_params'] | def preview(self, action_id: str, instructions: str, params: Optional[Dict]
=None) ->Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({'preview_only': True})
request = self._create_action_request(action_id, instructions, params, True
)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()['input_params'] | Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing. |
is_lc_serializable | return False | @classmethod
def is_lc_serializable(cls) ->bool:
return False | null |
refresh_schema | """Refreshes the graph schema information."""
pass | @abstractmethod
def refresh_schema(self) ->None:
"""Refreshes the graph schema information."""
pass | Refreshes the graph schema information. |
test_load_no_result | docs = retriever.get_relevant_documents('1605.08386WWW')
assert not docs | def test_load_no_result(retriever: ArxivRetriever) ->None:
docs = retriever.get_relevant_documents('1605.08386WWW')
assert not docs | null |
similarity_search_by_vector | return [doc for doc, _ in self.similarity_search_with_score_by_vector(
embedding, k, filter=filter)] | def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Document]:
return [doc for doc, _ in self.similarity_search_with_score_by_vector(
embedding, k, filter=filter)] | null |
embed_dict_type | """Helper function to embed a dictionary item."""
inner_dict: Dict = {}
for ns, embed_item in item.items():
if isinstance(embed_item, list):
inner_dict[ns] = []
for embed_list_item in embed_item:
embedded = embed_string_type(embed_list_item, model, ns)
inner_dict[ns].append(embedded[ns])
else:
inner_dict.update(embed_string_type(embed_item, model, ns))
return inner_dict | def embed_dict_type(item: Dict, model: Any) ->Dict[str, Any]:
"""Helper function to embed a dictionary item."""
inner_dict: Dict = {}
for ns, embed_item in item.items():
if isinstance(embed_item, list):
inner_dict[ns] = []
for embed_list_item in embed_item:
embedded = embed_string_type(embed_list_item, model, ns)
inner_dict[ns].append(embedded[ns])
else:
inner_dict.update(embed_string_type(embed_item, model, ns))
return inner_dict | Helper function to embed a dictionary item. |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable'] | Get the namespace of the langchain object. |
_import_interaction_tool | from langchain_community.tools.interaction.tool import StdInInquireTool
return StdInInquireTool | def _import_interaction_tool() ->Any:
from langchain_community.tools.interaction.tool import StdInInquireTool
return StdInInquireTool | null |
test_chat_baichuan_with_temperature | chat = ChatBaichuan(model='Baichuan2-13B', temperature=1.0)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str) | def test_chat_baichuan_with_temperature() ->None:
chat = ChatBaichuan(model='Baichuan2-13B', temperature=1.0)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str) | null |
add_files | """
LLMRails provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
Returns:
List of ids associated with each of the files indexed
"""
files = []
for file in files_list:
if not os.path.exists(file):
logging.error(f'File {file} does not exist, skipping')
continue
files.append(('file', (os.path.basename(file), open(file, 'rb'))))
response = self._session.post(
f'{self.base_url}/datastores/{self._datastore_id}/file', files=files,
verify=True, headers=self._get_post_headers())
if response.status_code != 200:
logging.error(
f'Create request failed for datastore = {self._datastore_id} with status code {response.status_code}, reason {response.reason}, text {response.text}'
)
return False
return True | def add_files(self, files_list: Iterable[str], metadatas: Optional[List[
dict]]=None, **kwargs: Any) ->bool:
"""
LLMRails provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
Returns:
List of ids associated with each of the files indexed
"""
files = []
for file in files_list:
if not os.path.exists(file):
logging.error(f'File {file} does not exist, skipping')
continue
files.append(('file', (os.path.basename(file), open(file, 'rb'))))
response = self._session.post(
f'{self.base_url}/datastores/{self._datastore_id}/file', files=
files, verify=True, headers=self._get_post_headers())
if response.status_code != 200:
logging.error(
f'Create request failed for datastore = {self._datastore_id} with status code {response.status_code}, reason {response.reason}, text {response.text}'
)
return False
return True | LLMRails provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
Returns:
List of ids associated with each of the files indexed |
close | """Close any loggers to allow writing out of any profiles before exiting."""
if self._logger and hasattr(self._logger, 'close'):
self._logger.close()
diagnostic_logger.info('Closing WhyLabs logger, see you next time!') | def close(self) ->None:
"""Close any loggers to allow writing out of any profiles before exiting."""
if self._logger and hasattr(self._logger, 'close'):
self._logger.close()
diagnostic_logger.info('Closing WhyLabs logger, see you next time!') | Close any loggers to allow writing out of any profiles before exiting. |
functions | return [dict(format_tool_to_openai_function(t)) for t in self.tools] | @property
def functions(self) ->List[dict]:
return [dict(format_tool_to_openai_function(t)) for t in self.tools] | null |
_generate | should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **{'stream': stream} if stream is not None else {}, **
kwargs}
response = self.completion_with_retry(messages=message_dicts, run_manager=
run_manager, **params)
return self._create_chat_result(response) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **{'stream': stream} if stream is not None else {},
**kwargs}
response = self.completion_with_retry(messages=message_dicts,
run_manager=run_manager, **params)
return self._create_chat_result(response) | null |
_key_from_id | wout_prefix = id_.split(CONTEXT_CONFIG_PREFIX, maxsplit=1)[1]
if wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_GET):
return wout_prefix[:-len(CONTEXT_CONFIG_SUFFIX_GET)]
elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET):
return wout_prefix[:-len(CONTEXT_CONFIG_SUFFIX_SET)]
else:
raise ValueError(f'Invalid context config id {id_}') | def _key_from_id(id_: str) ->str:
wout_prefix = id_.split(CONTEXT_CONFIG_PREFIX, maxsplit=1)[1]
if wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_GET):
return wout_prefix[:-len(CONTEXT_CONFIG_SUFFIX_GET)]
elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET):
return wout_prefix[:-len(CONTEXT_CONFIG_SUFFIX_SET)]
else:
raise ValueError(f'Invalid context config id {id_}') | null |
_parse_document | return Document(page_content=data.pop(self._text_field), metadata=data.pop(
self._metadata_field) if self._metadata_field else data) | def _parse_document(self, data: dict) ->Document:
return Document(page_content=data.pop(self._text_field), metadata=data.
pop(self._metadata_field) if self._metadata_field else data) | null |
_run | """Use the tool."""
all_params = {'file_url': query, 'language': self.language, 'speakers':
self.speakers, 'profanity_filter': self.profanity_filter,
'custom_vocabulary': self.custom_vocabulary}
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text['results'][self.providers[0]]['text'] | def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
all_params = {'file_url': query, 'language': self.language, 'speakers':
self.speakers, 'profanity_filter': self.profanity_filter,
'custom_vocabulary': self.custom_vocabulary}
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text['results'][self.providers[0]]['text'] | Use the tool. |
test_duckdb_loader_no_options | """Test DuckDB loader."""
loader = DuckDBLoader('SELECT 1 AS a, 2 AS b')
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'a: 1\nb: 2'
assert docs[0].metadata == {} | @unittest.skipIf(not duckdb_installed, 'duckdb not installed')
def test_duckdb_loader_no_options() ->None:
"""Test DuckDB loader."""
loader = DuckDBLoader('SELECT 1 AS a, 2 AS b')
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'a: 1\nb: 2'
assert docs[0].metadata == {} | Test DuckDB loader. |
test_multi_output_errors | """Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar', 'grok'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2]) | def test_multi_output_errors() ->None:
"""Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar',
'grok'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2]) | Test simple sequential errors if multiple output variables are expected. |
_import_serpapi | from langchain_community.utilities.serpapi import SerpAPIWrapper
return SerpAPIWrapper | def _import_serpapi() ->Any:
from langchain_community.utilities.serpapi import SerpAPIWrapper
return SerpAPIWrapper | null |
_get_relevant_documents | if self.get_full_documents:
return self.load(query=query)
else:
return self.get_summaries_as_docs(query) | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
if self.get_full_documents:
return self.load(query=query)
else:
return self.get_summaries_as_docs(query) | null |
push | """
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
True (Public by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(repo_full_name, manifest_json, parent_commit_hash=
parent_commit_hash, new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description)
return message | def push(repo_full_name: str, object: Any, *, api_url: Optional[str]=None,
api_key: Optional[str]=None, parent_commit_hash: Optional[str]='latest',
new_repo_is_public: bool=True, new_repo_description: str='') ->str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
True (Public by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(repo_full_name, manifest_json, parent_commit_hash
=parent_commit_hash, new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description)
return message | Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
True (Public by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string. |
create_openai_tools_agent | """Create an agent that uses OpenAI tools.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'agent_scratchpad'}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
llm_with_tools = llm.bind(tools=[format_tool_to_openai_tool(tool) for tool in
tools])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_to_openai_tool_messages(x['intermediate_steps'])
) | prompt | llm_with_tools | OpenAIToolsAgentOutputParser()
return agent | def create_openai_tools_agent(llm: BaseLanguageModel, tools: Sequence[
BaseTool], prompt: ChatPromptTemplate) ->Runnable:
"""Create an agent that uses OpenAI tools.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'agent_scratchpad'}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
llm_with_tools = llm.bind(tools=[format_tool_to_openai_tool(tool) for
tool in tools])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_to_openai_tool_messages(x['intermediate_steps'])
) | prompt | llm_with_tools | OpenAIToolsAgentOutputParser()
return agent | Create an agent that uses OpenAI tools.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish. |
_llm_type | return 'fake-llm' | @property
def _llm_type(self) ->str:
return 'fake-llm' | null |
_import_textgen | from langchain_community.llms.textgen import TextGen
return TextGen | def _import_textgen() ->Any:
from langchain_community.llms.textgen import TextGen
return TextGen | null |
test_api_key_is_string | llm = EdenAiEmbeddings(edenai_api_key='secret-api-key')
assert isinstance(llm.edenai_api_key, SecretStr) | def test_api_key_is_string() ->None:
llm = EdenAiEmbeddings(edenai_api_key='secret-api-key')
assert isinstance(llm.edenai_api_key, SecretStr) | null |
create_chat_prompt_template | """Create a chat prompt template."""
return ChatPromptTemplate(input_variables=['foo', 'bar', 'context'],
messages=create_messages()) | def create_chat_prompt_template() ->ChatPromptTemplate:
"""Create a chat prompt template."""
return ChatPromptTemplate(input_variables=['foo', 'bar', 'context'],
messages=create_messages()) | Create a chat prompt template. |
_make_with_name | def _make_tool(dec_func: Union[Callable, Runnable]) ->BaseTool:
if isinstance(dec_func, Runnable):
runnable = dec_func
if runnable.input_schema.schema().get('type') != 'object':
raise ValueError('Runnable must have an object schema.')
async def ainvoke_wrapper(callbacks: Optional[Callbacks]=None, **
kwargs: Any) ->Any:
return await runnable.ainvoke(kwargs, {'callbacks': callbacks})
def invoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs: Any
) ->Any:
return runnable.invoke(kwargs, {'callbacks': callbacks})
coroutine = ainvoke_wrapper
func = invoke_wrapper
schema: Optional[Type[BaseModel]] = runnable.input_schema
description = repr(runnable)
elif inspect.iscoroutinefunction(dec_func):
coroutine = dec_func
func = None
schema = args_schema
description = None
else:
coroutine = None
func = dec_func
schema = args_schema
description = None
if infer_schema or args_schema is not None:
return StructuredTool.from_function(func, coroutine, name=tool_name,
description=description, return_direct=return_direct,
args_schema=schema, infer_schema=infer_schema)
if func.__doc__ is None:
raise ValueError(
'Function must have a docstring if description not provided and infer_schema is False.'
)
return Tool(name=tool_name, func=func, description=f'{tool_name} tool',
return_direct=return_direct, coroutine=coroutine)
return _make_tool | def _make_with_name(tool_name: str) ->Callable:
def _make_tool(dec_func: Union[Callable, Runnable]) ->BaseTool:
if isinstance(dec_func, Runnable):
runnable = dec_func
if runnable.input_schema.schema().get('type') != 'object':
raise ValueError('Runnable must have an object schema.')
async def ainvoke_wrapper(callbacks: Optional[Callbacks]=None,
**kwargs: Any) ->Any:
return await runnable.ainvoke(kwargs, {'callbacks': callbacks})
def invoke_wrapper(callbacks: Optional[Callbacks]=None, **
kwargs: Any) ->Any:
return runnable.invoke(kwargs, {'callbacks': callbacks})
coroutine = ainvoke_wrapper
func = invoke_wrapper
schema: Optional[Type[BaseModel]] = runnable.input_schema
description = repr(runnable)
elif inspect.iscoroutinefunction(dec_func):
coroutine = dec_func
func = None
schema = args_schema
description = None
else:
coroutine = None
func = dec_func
schema = args_schema
description = None
if infer_schema or args_schema is not None:
return StructuredTool.from_function(func, coroutine, name=
tool_name, description=description, return_direct=
return_direct, args_schema=schema, infer_schema=infer_schema)
if func.__doc__ is None:
raise ValueError(
'Function must have a docstring if description not provided and infer_schema is False.'
)
return Tool(name=tool_name, func=func, description=
f'{tool_name} tool', return_direct=return_direct, coroutine=
coroutine)
return _make_tool | null |
on_llm_new_token | """Run on new LLM token. Only available when streaming is enabled."""
self.append_to_last_tokens(token)
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush() | def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Run on new LLM token. Only available when streaming is enabled."""
self.append_to_last_tokens(token)
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush() | Run on new LLM token. Only available when streaming is enabled. |
select_examples | """Select which examples to use based on the inputs."""
return list(self.examples) | def select_examples(self, input_variables: Dict[str, str]) ->List[dict]:
"""Select which examples to use based on the inputs."""
return list(self.examples) | Select which examples to use based on the inputs. |
__init__ | """Initialize the tracer."""
super().__init__()
self.runs: List[Run] = [] | def __init__(self) ->None:
"""Initialize the tracer."""
super().__init__()
self.runs: List[Run] = [] | Initialize the tracer. |
invoke_delete_with_no_args | vectorstore: AzureCosmosDBVectorSearch = (AzureCosmosDBVectorSearch.
from_connection_string(CONNECTION_STRING, NAMESPACE,
azure_openai_embeddings, index_name=INDEX_NAME))
return vectorstore.delete() | def invoke_delete_with_no_args(self, azure_openai_embeddings:
OpenAIEmbeddings, collection: Any) ->Optional[bool]:
vectorstore: AzureCosmosDBVectorSearch = (AzureCosmosDBVectorSearch.
from_connection_string(CONNECTION_STRING, NAMESPACE,
azure_openai_embeddings, index_name=INDEX_NAME))
return vectorstore.delete() | null |
load | """Load documents from EverNote export file."""
documents = [Document(page_content=note['content'], metadata={**{key: value for
key, value in note.items() if key not in ['content', 'content-raw',
'resource']}, **{'source': self.file_path}}) for note in self.
_parse_note_xml(self.file_path) if note.get('content') is not None]
if not self.load_single_document:
return documents
return [Document(page_content=''.join([document.page_content for document in
documents]), metadata={'source': self.file_path})] | def load(self) ->List[Document]:
"""Load documents from EverNote export file."""
documents = [Document(page_content=note['content'], metadata={**{key:
value for key, value in note.items() if key not in ['content',
'content-raw', 'resource']}, **{'source': self.file_path}}) for
note in self._parse_note_xml(self.file_path) if note.get('content')
is not None]
if not self.load_single_document:
return documents
return [Document(page_content=''.join([document.page_content for
document in documents]), metadata={'source': self.file_path})] | Load documents from EverNote export file. |
test_larksuite_doc_loader | """Test LarkSuite (FeiShu) document loader."""
loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content is not None | def test_larksuite_doc_loader() ->None:
"""Test LarkSuite (FeiShu) document loader."""
loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content is not None | Test LarkSuite (FeiShu) document loader. |
Subsets and Splits