method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
_import_slack_get_message | from langchain_community.tools.slack.get_message import SlackGetMessage
return SlackGetMessage | def _import_slack_get_message() ->Any:
from langchain_community.tools.slack.get_message import SlackGetMessage
return SlackGetMessage | null |
_import_google_search | from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
return GoogleSearchAPIWrapper | def _import_google_search() ->Any:
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
return GoogleSearchAPIWrapper | null |
test_wrapper_api_key_initialization | """Test Wrapper initializes with an API Key."""
ZapierNLAWrapper(zapier_nla_api_key='test') | def test_wrapper_api_key_initialization() ->None:
"""Test Wrapper initializes with an API Key."""
ZapierNLAWrapper(zapier_nla_api_key='test') | Test Wrapper initializes with an API Key. |
_create_child | """Create a new child with the given params"""
if type == ChildType.MARKDOWN:
self.markdown(**kwargs)
elif type == ChildType.EXCEPTION:
self.exception(**kwargs)
else:
raise RuntimeError(f'Unexpected child type {type}') | def _create_child(self, type: ChildType, kwargs: Dict[str, Any]) ->None:
"""Create a new child with the given params"""
if type == ChildType.MARKDOWN:
self.markdown(**kwargs)
elif type == ChildType.EXCEPTION:
self.exception(**kwargs)
else:
raise RuntimeError(f'Unexpected child type {type}') | Create a new child with the given params |
similarity_search_with_score | raise NotImplementedError | def similarity_search_with_score(self, *args: Any, **kwargs: Any) ->List[Tuple
[Document, float]]:
raise NotImplementedError | null |
test_promptlayer_openai_chat_call | """Test valid call to promptlayer openai."""
llm = PromptLayerOpenAIChat(max_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str) | def test_promptlayer_openai_chat_call() ->None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAIChat(max_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str) | Test valid call to promptlayer openai. |
on_chain_start | """If the key `input` is in `inputs`, then save it in `self.prompts` using
either the `parent_run_id` or the `run_id` as the key. This is done so that
we don't log the same input prompt twice, once when the LLM starts and once
when the chain starts.
"""
if 'input' in inputs:
self.prompts.update({str(kwargs['parent_run_id'] or kwargs['run_id']):
inputs['input'] if isinstance(inputs['input'], list) else [inputs[
'input']]}) | def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""If the key `input` is in `inputs`, then save it in `self.prompts` using
either the `parent_run_id` or the `run_id` as the key. This is done so that
we don't log the same input prompt twice, once when the LLM starts and once
when the chain starts.
"""
if 'input' in inputs:
self.prompts.update({str(kwargs['parent_run_id'] or kwargs['run_id'
]): inputs['input'] if isinstance(inputs['input'], list) else [
inputs['input']]}) | If the key `input` is in `inputs`, then save it in `self.prompts` using
either the `parent_run_id` or the `run_id` as the key. This is done so that
we don't log the same input prompt twice, once when the LLM starts and once
when the chain starts. |
_low_confidence_spans | _low_idx = np.where(np.exp(log_probs) < min_prob)[0]
low_idx = [i for i in _low_idx if re.search('\\w', tokens[i])]
if len(low_idx) == 0:
return []
spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]]
for i, idx in enumerate(low_idx[1:]):
end = idx + num_pad_tokens + 1
if idx - low_idx[i] < min_token_gap:
spans[-1][1] = end
else:
spans.append([idx, end])
return [''.join(tokens[start:end]) for start, end in spans] | def _low_confidence_spans(tokens: Sequence[str], log_probs: Sequence[float],
min_prob: float, min_token_gap: int, num_pad_tokens: int) ->List[str]:
_low_idx = np.where(np.exp(log_probs) < min_prob)[0]
low_idx = [i for i in _low_idx if re.search('\\w', tokens[i])]
if len(low_idx) == 0:
return []
spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]]
for i, idx in enumerate(low_idx[1:]):
end = idx + num_pad_tokens + 1
if idx - low_idx[i] < min_token_gap:
spans[-1][1] = end
else:
spans.append([idx, end])
return [''.join(tokens[start:end]) for start, end in spans] | null |
embed_documents | """Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [[self.embed_instruction, text] for text in texts]
embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs)
return embeddings.tolist() | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [[self.embed_instruction, text] for text in texts]
embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs)
return embeddings.tolist() | Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text. |
on_chain_start | self.on_chain_start_common() | def on_chain_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_start_common() | null |
_on_chain_error | """Process the Chain Run upon error."""
self._process_end_trace(run) | def _on_chain_error(self, run: 'Run') ->None:
"""Process the Chain Run upon error."""
self._process_end_trace(run) | Process the Chain Run upon error. |
_convert_message_to_dict | if isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content}
else:
raise ValueError(f'Got unknown type {message}')
return message_dict | def _convert_message_to_dict(message: BaseMessage) ->dict:
if isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content}
else:
raise ValueError(f'Got unknown type {message}')
return message_dict | null |
process_doc | try:
import docx2txt
except ImportError:
raise ImportError(
'`docx2txt` package not found, please run `pip install docx2txt`')
response = self.confluence.request(path=link, absolute=True)
text = ''
if response.status_code != 200 or response.content == b'' or response.content is None:
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data) | def process_doc(self, link: str) ->str:
try:
import docx2txt
except ImportError:
raise ImportError(
'`docx2txt` package not found, please run `pip install docx2txt`')
response = self.confluence.request(path=link, absolute=True)
text = ''
if (response.status_code != 200 or response.content == b'' or response.
content is None):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data) | null |
_get_relevant_documents | body = self.body.copy()
body['query'] = query
return self._query(body) | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
body = self.body.copy()
body['query'] = query
return self._query(body) | null |
is_lc_serializable | """Return whether or not the class is serializable."""
return True | @classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether or not the class is serializable."""
return True | Return whether or not the class is serializable. |
output_keys | """Output keys."""
if self.return_intermediate_steps:
return ['output', 'critiques_and_revisions', 'initial_output']
return ['output'] | @property
def output_keys(self) ->List[str]:
"""Output keys."""
if self.return_intermediate_steps:
return ['output', 'critiques_and_revisions', 'initial_output']
return ['output'] | Output keys. |
from_texts | """
Creates an instance of the VST class from the given texts.
Args:
texts (List[str]): List of texts to be added.
embedding (Embeddings): Embedding model for the texts.
metadatas (List[dict], optional):
List of metadata dictionaries for each text.Defaults to None.
table_name (str): Name of the table. Defaults to "test".
database_name (str): Name of the database. Defaults to "default".
connection_args (dict[str, Any]): Connection parameters.
Defaults to DEFAULT_HIPPO_CONNECTION.
index_params (dict): Indexing parameters. Defaults to None.
search_params (dict): Search parameters. Defaults to an empty dictionary.
drop_old (bool): Whether to drop the old collection. Defaults to False.
kwargs: Other arguments.
Returns:
Hippo: An instance of the VST class.
"""
if search_params is None:
search_params = {}
logger.info('00 [from_texts] init the class of Hippo')
vector_db = cls(embedding_function=embedding, table_name=table_name,
database_name=database_name, connection_args=connection_args,
index_params=index_params, drop_old=drop_old, **kwargs)
logger.debug(f'[from_texts] texts:{texts}')
logger.debug(f'[from_texts] metadatas:{metadatas}')
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db | @classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, table_name: str='test', database_name: str=
'default', connection_args: Dict[str, Any]=DEFAULT_HIPPO_CONNECTION,
index_params: Optional[Dict[Any, Any]]=None, search_params: Optional[
Dict[str, Any]]=None, drop_old: bool=False, **kwargs: Any) ->'Hippo':
"""
Creates an instance of the VST class from the given texts.
Args:
texts (List[str]): List of texts to be added.
embedding (Embeddings): Embedding model for the texts.
metadatas (List[dict], optional):
List of metadata dictionaries for each text.Defaults to None.
table_name (str): Name of the table. Defaults to "test".
database_name (str): Name of the database. Defaults to "default".
connection_args (dict[str, Any]): Connection parameters.
Defaults to DEFAULT_HIPPO_CONNECTION.
index_params (dict): Indexing parameters. Defaults to None.
search_params (dict): Search parameters. Defaults to an empty dictionary.
drop_old (bool): Whether to drop the old collection. Defaults to False.
kwargs: Other arguments.
Returns:
Hippo: An instance of the VST class.
"""
if search_params is None:
search_params = {}
logger.info('00 [from_texts] init the class of Hippo')
vector_db = cls(embedding_function=embedding, table_name=table_name,
database_name=database_name, connection_args=connection_args,
index_params=index_params, drop_old=drop_old, **kwargs)
logger.debug(f'[from_texts] texts:{texts}')
logger.debug(f'[from_texts] metadatas:{metadatas}')
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db | Creates an instance of the VST class from the given texts.
Args:
texts (List[str]): List of texts to be added.
embedding (Embeddings): Embedding model for the texts.
metadatas (List[dict], optional):
List of metadata dictionaries for each text.Defaults to None.
table_name (str): Name of the table. Defaults to "test".
database_name (str): Name of the database. Defaults to "default".
connection_args (dict[str, Any]): Connection parameters.
Defaults to DEFAULT_HIPPO_CONNECTION.
index_params (dict): Indexing parameters. Defaults to None.
search_params (dict): Search parameters. Defaults to an empty dictionary.
drop_old (bool): Whether to drop the old collection. Defaults to False.
kwargs: Other arguments.
Returns:
Hippo: An instance of the VST class. |
test_singlestoredb_from_documents | """Test from_documents constructor."""
table_name = 'test_singlestoredb_from_documents'
drop(table_name)
docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts]
docsearch = SingleStoreDB.from_documents(docs, NormilizedFakeEmbeddings(),
table_name=table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1)
assert output == TEST_SINGLE_WITH_METADATA_RESULT
drop(table_name) | @pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_from_documents(texts: List[str]) ->None:
"""Test from_documents constructor."""
table_name = 'test_singlestoredb_from_documents'
drop(table_name)
docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts]
docsearch = SingleStoreDB.from_documents(docs, NormilizedFakeEmbeddings
(), table_name=table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1)
assert output == TEST_SINGLE_WITH_METADATA_RESULT
drop(table_name) | Test from_documents constructor. |
_stream | provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(provider=provider,
messages=messages)
for chunk in self._prepare_input_and_invoke_stream(prompt=prompt, stop=stop,
run_manager=run_manager, **kwargs):
delta = chunk.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) | def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(provider=provider,
messages=messages)
for chunk in self._prepare_input_and_invoke_stream(prompt=prompt, stop=
stop, run_manager=run_manager, **kwargs):
delta = chunk.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) | null |
add_texts | ids = ids or [str(uuid.uuid4()) for _ in texts]
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs, ids) | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[Any,
Any]]]=None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
ids = ids or [str(uuid.uuid4()) for _ in texts]
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs, ids) | null |
get_task_attribute | """
Update an attribute of a specified task.
"""
task = self.get_task(query, fault_tolerant=True)
params, error = load_query(query, fault_tolerant=True)
if not isinstance(params, dict):
return {'Error': error}
if params['attribute_name'] not in task:
return {'Error':
f"""attribute_name = {params['attribute_name']} was not
found in task keys {task.keys()}. Please call again with one of the key names."""
}
return {params['attribute_name']: task[params['attribute_name']]} | def get_task_attribute(self, query: str) ->Dict:
"""
Update an attribute of a specified task.
"""
task = self.get_task(query, fault_tolerant=True)
params, error = load_query(query, fault_tolerant=True)
if not isinstance(params, dict):
return {'Error': error}
if params['attribute_name'] not in task:
return {'Error':
f"""attribute_name = {params['attribute_name']} was not
found in task keys {task.keys()}. Please call again with one of the key names."""
}
return {params['attribute_name']: task[params['attribute_name']]} | Update an attribute of a specified task. |
_call | _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
output = self.question_to_checked_assertions_chain({'question': question},
callbacks=_run_manager.get_child())
return {self.output_key: output['revised_statement']} | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
output = self.question_to_checked_assertions_chain({'question':
question}, callbacks=_run_manager.get_child())
return {self.output_key: output['revised_statement']} | null |
_transform | from langchain_core.beta.runnables.context import config_with_context
steps = [self.first] + self.middle + [self.last]
config = config_with_context(config, self.steps)
final_pipeline = cast(Iterator[Output], input)
for step in steps:
final_pipeline = step.transform(final_pipeline, patch_config(config,
callbacks=run_manager.get_child(f'seq:step:{steps.index(step) + 1}')))
for output in final_pipeline:
yield output | def _transform(self, input: Iterator[Input], run_manager:
CallbackManagerForChainRun, config: RunnableConfig) ->Iterator[Output]:
from langchain_core.beta.runnables.context import config_with_context
steps = [self.first] + self.middle + [self.last]
config = config_with_context(config, self.steps)
final_pipeline = cast(Iterator[Output], input)
for step in steps:
final_pipeline = step.transform(final_pipeline, patch_config(config,
callbacks=run_manager.get_child(
f'seq:step:{steps.index(step) + 1}')))
for output in final_pipeline:
yield output | null |
format_black | """Format a file with black."""
subprocess.run(f'black {filepath}', stderr=subprocess.STDOUT, text=True,
shell=True, timeout=3, check=False) | def format_black(filepath: str):
"""Format a file with black."""
subprocess.run(f'black {filepath}', stderr=subprocess.STDOUT, text=True,
shell=True, timeout=3, check=False) | Format a file with black. |
test_docugami_loader_local | """Test DocugamiLoader."""
loader = DocugamiLoader(file_paths=[DOCUGAMI_XML_PATH])
docs = loader.load()
assert len(docs) == 25
assert '/docset:DisclosingParty' in docs[1].metadata['xpath']
assert 'h1' in docs[1].metadata['structure']
assert 'DisclosingParty' in docs[1].metadata['tag']
assert docs[1].page_content.startswith('Disclosing') | @pytest.mark.requires('dgml_utils')
def test_docugami_loader_local() ->None:
"""Test DocugamiLoader."""
loader = DocugamiLoader(file_paths=[DOCUGAMI_XML_PATH])
docs = loader.load()
assert len(docs) == 25
assert '/docset:DisclosingParty' in docs[1].metadata['xpath']
assert 'h1' in docs[1].metadata['structure']
assert 'DisclosingParty' in docs[1].metadata['tag']
assert docs[1].page_content.startswith('Disclosing') | Test DocugamiLoader. |
test_visit_structured_query_no_filter | query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual | def test_visit_structured_query_no_filter() ->None:
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual | null |
_type | """Return the type key."""
return 'combining' | @property
def _type(self) ->str:
"""Return the type key."""
return 'combining' | Return the type key. |
_evaluate_expression | try:
import sympy
except ImportError as e:
raise ImportError(
'Unable to import sympy, please install it with `pip install sympy`.'
) from e
try:
output = str(sympy.sympify(expression, evaluate=True))
except Exception as e:
raise ValueError(
f'LLMSymbolicMathChain._evaluate("{expression}") raised error: {e}. Please try again with a valid numerical expression'
)
return re.sub('^\\[|\\]$', '', output) | def _evaluate_expression(self, expression: str) ->str:
try:
import sympy
except ImportError as e:
raise ImportError(
'Unable to import sympy, please install it with `pip install sympy`.'
) from e
try:
output = str(sympy.sympify(expression, evaluate=True))
except Exception as e:
raise ValueError(
f'LLMSymbolicMathChain._evaluate("{expression}") raised error: {e}. Please try again with a valid numerical expression'
)
return re.sub('^\\[|\\]$', '', output) | null |
test_xata_chat_memory | message_history = XataChatMessageHistory(api_key=os.getenv('XATA_API_KEY',
''), db_url=os.getenv('XATA_DB_URL', ''), session_id=
'integration-test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear() | def test_xata_chat_memory(self) ->None:
message_history = XataChatMessageHistory(api_key=os.getenv(
'XATA_API_KEY', ''), db_url=os.getenv('XATA_DB_URL', ''),
session_id='integration-test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear() | null |
test_python_ast_repl_tool_single_input | """Test that the python REPL tool works with a single input."""
tool = PythonAstREPLTool()
assert tool.is_single_input
assert tool.run('1 + 1') == 2 | @pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_python_ast_repl_tool_single_input() ->None:
"""Test that the python REPL tool works with a single input."""
tool = PythonAstREPLTool()
assert tool.is_single_input
assert tool.run('1 + 1') == 2 | Test that the python REPL tool works with a single input. |
_load_single_chat_session | results: List[Union[AIMessage, HumanMessage]] = []
previous_sender = None
for message in messages:
if not isinstance(message, dict):
continue
text = message.get('text', '')
timestamp = message.get('ts', '')
sender = message.get('user', '')
if not sender:
continue
skip_pattern = re.compile('<@U\\d+> has joined the channel', flags=re.
IGNORECASE)
if skip_pattern.match(text):
continue
if sender == previous_sender:
results[-1].content += '\n\n' + text
results[-1].additional_kwargs['events'].append({'message_time':
timestamp})
else:
results.append(HumanMessage(role=sender, content=text,
additional_kwargs={'sender': sender, 'events': [{'message_time':
timestamp}]}))
previous_sender = sender
return ChatSession(messages=results) | def _load_single_chat_session(self, messages: List[Dict]) ->ChatSession:
results: List[Union[AIMessage, HumanMessage]] = []
previous_sender = None
for message in messages:
if not isinstance(message, dict):
continue
text = message.get('text', '')
timestamp = message.get('ts', '')
sender = message.get('user', '')
if not sender:
continue
skip_pattern = re.compile('<@U\\d+> has joined the channel', flags=
re.IGNORECASE)
if skip_pattern.match(text):
continue
if sender == previous_sender:
results[-1].content += '\n\n' + text
results[-1].additional_kwargs['events'].append({'message_time':
timestamp})
else:
results.append(HumanMessage(role=sender, content=text,
additional_kwargs={'sender': sender, 'events': [{
'message_time': timestamp}]}))
previous_sender = sender
return ChatSession(messages=results) | null |
test_numbered_list | parser = NumberedListOutputParser()
text1 = """Your response should be a numbered list with each item on a new line. For example:
1. foo
2. bar
3. baz"""
text2 = """Items:
1. apple
2. banana
3. cherry"""
text3 = 'No items in the list.'
for text, expected in [(text1, ['foo', 'bar', 'baz']), (text2, ['apple',
'banana', 'cherry']), (text3, [])]:
expectedlist = [[a] for a in cast(List[str], expected)]
assert parser.parse_folder(text) == expected
assert add(parser.transform(t for t in text)) == (expected or None)
assert list(parser.transform(t for t in text)) == expectedlist
assert list(parser.transform(t for t in text.splitlines(keepends=True))
) == expectedlist
assert list(parser.transform(' ' + t if i > 0 else t for i, t in
enumerate(text.split(' ')))) == expectedlist
assert list(parser.transform(iter([text]))) == expectedlist | def test_numbered_list() ->None:
parser = NumberedListOutputParser()
text1 = """Your response should be a numbered list with each item on a new line. For example:
1. foo
2. bar
3. baz"""
text2 = 'Items:\n\n1. apple\n\n 2. banana\n\n3. cherry'
text3 = 'No items in the list.'
for text, expected in [(text1, ['foo', 'bar', 'baz']), (text2, ['apple',
'banana', 'cherry']), (text3, [])]:
expectedlist = [[a] for a in cast(List[str], expected)]
assert parser.parse_folder(text) == expected
assert add(parser.transform(t for t in text)) == (expected or None)
assert list(parser.transform(t for t in text)) == expectedlist
assert list(parser.transform(t for t in text.splitlines(keepends=True))
) == expectedlist
assert list(parser.transform(' ' + t if i > 0 else t for i, t in
enumerate(text.split(' ')))) == expectedlist
assert list(parser.transform(iter([text]))) == expectedlist | null |
_do_generation | callbacks = _run_manager.get_child()
docs = []
for question in questions:
docs.extend(self.retriever.get_relevant_documents(question))
context = '\n\n'.join(d.page_content for d in docs)
result = self.response_chain.predict(user_input=user_input, context=context,
response=response, callbacks=callbacks)
marginal, finished = self.output_parser.parse_folder(result)
return marginal, finished | def _do_generation(self, questions: List[str], user_input: str, response:
str, _run_manager: CallbackManagerForChainRun) ->Tuple[str, bool]:
callbacks = _run_manager.get_child()
docs = []
for question in questions:
docs.extend(self.retriever.get_relevant_documents(question))
context = '\n\n'.join(d.page_content for d in docs)
result = self.response_chain.predict(user_input=user_input, context=
context, response=response, callbacks=callbacks)
marginal, finished = self.output_parser.parse_folder(result)
return marginal, finished | null |
evaluation_name | return 'json_validity' | @property
def evaluation_name(self) ->str:
return 'json_validity' | null |
load | return list(self.lazy_load()) | def load(self) ->List[Document]:
return list(self.lazy_load()) | null |
toy_dir | """Yield a pre-populated directory to test the blob loader."""
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, 'test.txt'), 'w') as test_txt:
test_txt.write('This is a test.txt file.')
with open(os.path.join(temp_dir, 'test.html'), 'w') as test_html:
test_html.write(
'<html><body><h1>This is a test.html file.</h1></body></html>')
with open(os.path.join(temp_dir, '.hidden_file'), 'w') as hidden_file:
hidden_file.write('This is a hidden file.')
some_dir = os.path.join(temp_dir, 'some_dir')
os.makedirs(some_dir)
with open(os.path.join(some_dir, 'nested_file.txt'), 'w') as nested_file:
nested_file.write('This is a nested_file.txt file.')
other_dir = os.path.join(some_dir, 'other_dir')
os.makedirs(other_dir)
with open(os.path.join(other_dir, 'more_nested.txt'), 'w') as nested_file:
nested_file.write('This is a more_nested.txt file.')
yield Path(temp_dir) | @pytest.fixture
def toy_dir() ->Generator[Path, None, None]:
"""Yield a pre-populated directory to test the blob loader."""
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, 'test.txt'), 'w') as test_txt:
test_txt.write('This is a test.txt file.')
with open(os.path.join(temp_dir, 'test.html'), 'w') as test_html:
test_html.write(
'<html><body><h1>This is a test.html file.</h1></body></html>')
with open(os.path.join(temp_dir, '.hidden_file'), 'w') as hidden_file:
hidden_file.write('This is a hidden file.')
some_dir = os.path.join(temp_dir, 'some_dir')
os.makedirs(some_dir)
with open(os.path.join(some_dir, 'nested_file.txt'), 'w'
) as nested_file:
nested_file.write('This is a nested_file.txt file.')
other_dir = os.path.join(some_dir, 'other_dir')
os.makedirs(other_dir)
with open(os.path.join(other_dir, 'more_nested.txt'), 'w'
) as nested_file:
nested_file.write('This is a more_nested.txt file.')
yield Path(temp_dir) | Yield a pre-populated directory to test the blob loader. |
is_lc_serializable | """Return whether this model can be serialized by Langchain."""
return True | @classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this model can be serialized by Langchain."""
return True | Return whether this model can be serialized by Langchain. |
test_vald_add_texts | texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 3
texts = ['a', 'b', 'c']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch.add_texts(texts, metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6 | def test_vald_add_texts() ->None:
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 3
texts = ['a', 'b', 'c']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch.add_texts(texts, metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6 | null |
from_llm | """Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'query', 'context' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
ContextQAEvalChain: the loaded QA eval chain.
"""
prompt = prompt or CONTEXT_PROMPT
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs) | @classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate]=
None, **kwargs: Any) ->ContextQAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'query', 'context' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
ContextQAEvalChain: the loaded QA eval chain.
"""
prompt = prompt or CONTEXT_PROMPT
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs) | Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'query', 'context' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
ContextQAEvalChain: the loaded QA eval chain. |
clear | """Clear session memory from SingleStoreDB"""
self._create_table_if_not_exists()
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute('DELETE FROM {} WHERE {} = %s'.format(self.table_name,
self.session_id_field), self.session_id)
finally:
cur.close()
finally:
conn.close() | def clear(self) ->None:
"""Clear session memory from SingleStoreDB"""
self._create_table_if_not_exists()
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute('DELETE FROM {} WHERE {} = %s'.format(self.
table_name, self.session_id_field), self.session_id)
finally:
cur.close()
finally:
conn.close() | Clear session memory from SingleStoreDB |
test_on_llm_end | response = LLMResult(generations=[], llm_output={'token_usage': {
'prompt_tokens': 2, 'completion_tokens': 1, 'total_tokens': 3},
'model_name': BaseOpenAI.__fields__['model_name'].default})
handler.on_llm_end(response)
assert handler.successful_requests == 1
assert handler.total_tokens == 3
assert handler.prompt_tokens == 2
assert handler.completion_tokens == 1
assert handler.total_cost > 0 | def test_on_llm_end(handler: OpenAICallbackHandler) ->None:
response = LLMResult(generations=[], llm_output={'token_usage': {
'prompt_tokens': 2, 'completion_tokens': 1, 'total_tokens': 3},
'model_name': BaseOpenAI.__fields__['model_name'].default})
handler.on_llm_end(response)
assert handler.successful_requests == 1
assert handler.total_tokens == 3
assert handler.prompt_tokens == 2
assert handler.completion_tokens == 1
assert handler.total_cost > 0 | null |
_import_astradb | from langchain_community.vectorstores.astradb import AstraDB
return AstraDB | def _import_astradb() ->Any:
from langchain_community.vectorstores.astradb import AstraDB
return AstraDB | null |
__init__ | """Initializes private fields."""
try:
from google.cloud.discoveryengine_v1beta import SearchServiceClient
except ImportError as exc:
raise ImportError(
'google.cloud.discoveryengine is not installed.Please install it with pip install google-cloud-discoveryengine'
) from exc
super().__init__(**kwargs)
self._client = SearchServiceClient(credentials=self.credentials,
client_options=self.client_options, client_info=get_client_info(module=
'vertex-ai-search'))
self._serving_config = self._client.serving_config_path(project=self.
project_id, location=self.location_id, data_store=self.data_store_id,
serving_config=self.serving_config_id) | def __init__(self, **kwargs: Any) ->None:
"""Initializes private fields."""
try:
from google.cloud.discoveryengine_v1beta import SearchServiceClient
except ImportError as exc:
raise ImportError(
'google.cloud.discoveryengine is not installed.Please install it with pip install google-cloud-discoveryengine'
) from exc
super().__init__(**kwargs)
self._client = SearchServiceClient(credentials=self.credentials,
client_options=self.client_options, client_info=get_client_info(
module='vertex-ai-search'))
self._serving_config = self._client.serving_config_path(project=self.
project_id, location=self.location_id, data_store=self.
data_store_id, serving_config=self.serving_config_id) | Initializes private fields. |
_completion_with_retry | generation_config = kwargs.get('generation_config', {})
if is_gemini:
return llm.client.generate_content(contents=prompt, stream=stream,
generation_config=generation_config)
return llm.client.generate_text(prompt=prompt, **kwargs) | @retry_decorator
def _completion_with_retry(prompt: LanguageModelInput, is_gemini: bool,
stream: bool, **kwargs: Any) ->Any:
generation_config = kwargs.get('generation_config', {})
if is_gemini:
return llm.client.generate_content(contents=prompt, stream=stream,
generation_config=generation_config)
return llm.client.generate_text(prompt=prompt, **kwargs) | null |
embed_query | """Call out to Infinity's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0] | def embed_query(self, text: str) ->List[float]:
"""Call out to Infinity's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0] | Call out to Infinity's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text. |
test_openai_stop_valid | """Test openai stop logic on valid configuration."""
query = 'write an ordered list of five items'
first_llm = OpenAI(stop='3', temperature=0)
first_output = first_llm(query)
second_llm = OpenAI(temperature=0)
second_output = second_llm(query, stop=['3'])
assert first_output == second_output | def test_openai_stop_valid() ->None:
"""Test openai stop logic on valid configuration."""
query = 'write an ordered list of five items'
first_llm = OpenAI(stop='3', temperature=0)
first_output = first_llm(query)
second_llm = OpenAI(temperature=0)
second_output = second_llm(query, stop=['3'])
assert first_output == second_output | Test openai stop logic on valid configuration. |
test_from_texts | from momento import CredentialProvider, VectorIndexConfigurations
random_text = random_string()
random_document = f'Hello world {random_text} goodbye world!'
texts.insert(0, random_document)
vector_store = None
try:
vector_store = MomentoVectorIndex.from_texts(texts=texts, embedding=
embedding_openai, index_name=random_index_name, configuration=
VectorIndexConfigurations.Default.latest(), credential_provider=
CredentialProvider.from_environment_variable('MOMENTO_API_KEY'))
wait()
documents = vector_store.similarity_search(query=random_text, k=1)
assert documents == [Document(page_content=random_document)]
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name) | def test_from_texts(random_index_name: str, embedding_openai:
OpenAIEmbeddings, texts: List[str]) ->None:
from momento import CredentialProvider, VectorIndexConfigurations
random_text = random_string()
random_document = f'Hello world {random_text} goodbye world!'
texts.insert(0, random_document)
vector_store = None
try:
vector_store = MomentoVectorIndex.from_texts(texts=texts, embedding
=embedding_openai, index_name=random_index_name, configuration=
VectorIndexConfigurations.Default.latest(), credential_provider
=CredentialProvider.from_environment_variable('MOMENTO_API_KEY'))
wait()
documents = vector_store.similarity_search(query=random_text, k=1)
assert documents == [Document(page_content=random_document)]
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name) | null |
test_missing_basedOn_from_throws | llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder()))
actions = ['0', '1', '2']
with pytest.raises(ValueError):
chain.run(action=rl_chain.ToSelectFrom(actions)) | @pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_missing_basedOn_from_throws() ->None:
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed
=False, model=MockEncoder()))
actions = ['0', '1', '2']
with pytest.raises(ValueError):
chain.run(action=rl_chain.ToSelectFrom(actions)) | null |
on_agent_action | """Run on agent action."""
print_text(action.log, color=color or self.color, file=self.file) | def on_agent_action(self, action: AgentAction, color: Optional[str]=None,
**kwargs: Any) ->Any:
"""Run on agent action."""
print_text(action.log, color=color or self.color, file=self.file) | Run on agent action. |
reduce_endpoint_docs | out = {}
if docs.get('description'):
out['description'] = docs.get('description')
if docs.get('parameters'):
out['parameters'] = [parameter for parameter in docs.get('parameters',
[]) if parameter.get('required')]
if '200' in docs['responses']:
out['responses'] = docs['responses']['200']
if docs.get('requestBody'):
out['requestBody'] = docs.get('requestBody')
return out | def reduce_endpoint_docs(docs: dict) ->dict:
out = {}
if docs.get('description'):
out['description'] = docs.get('description')
if docs.get('parameters'):
out['parameters'] = [parameter for parameter in docs.get(
'parameters', []) if parameter.get('required')]
if '200' in docs['responses']:
out['responses'] = docs['responses']['200']
if docs.get('requestBody'):
out['requestBody'] = docs.get('requestBody')
return out | null |
test_custom_caching | """Test custom_caching behavior."""
Base = declarative_base()
class FulltextLLMCache(Base):
"""Postgres table for fulltext-indexed LLM Cache."""
__tablename__ = 'llm_cache_fulltext'
id = Column(Integer, Sequence('cache_id'), primary_key=True)
prompt = Column(String, nullable=False)
llm = Column(String, nullable=False)
idx = Column(Integer)
response = Column(String)
engine = create_engine('sqlite://')
set_llm_cache(SQLAlchemyCache(engine, FulltextLLMCache))
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
output = llm.generate(['foo', 'bar', 'foo'])
expected_cache_output = [Generation(text='foo')]
cache_output = get_llm_cache().lookup('bar', llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [[Generation(text='fizz')], [Generation(text='foo')],
[Generation(text='fizz')]]
expected_output = LLMResult(generations=expected_generations, llm_output=None)
assert output == expected_output | def test_custom_caching() ->None:
"""Test custom_caching behavior."""
Base = declarative_base()
class FulltextLLMCache(Base):
"""Postgres table for fulltext-indexed LLM Cache."""
__tablename__ = 'llm_cache_fulltext'
id = Column(Integer, Sequence('cache_id'), primary_key=True)
prompt = Column(String, nullable=False)
llm = Column(String, nullable=False)
idx = Column(Integer)
response = Column(String)
engine = create_engine('sqlite://')
set_llm_cache(SQLAlchemyCache(engine, FulltextLLMCache))
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
output = llm.generate(['foo', 'bar', 'foo'])
expected_cache_output = [Generation(text='foo')]
cache_output = get_llm_cache().lookup('bar', llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [[Generation(text='fizz')], [Generation(text=
'foo')], [Generation(text='fizz')]]
expected_output = LLMResult(generations=expected_generations,
llm_output=None)
assert output == expected_output | Test custom_caching behavior. |
_get_default_text_splitter | return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | def _get_default_text_splitter() ->TextSplitter:
return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | null |
_create_chat_result | generations = []
for res in response.choices:
finish_reason = getattr(res, 'finish_reason')
if finish_reason:
finish_reason = finish_reason.value
gen = ChatGeneration(message=_convert_mistral_chat_message_to_message(
res.message), generation_info={'finish_reason': finish_reason})
generations.append(gen)
token_usage = getattr(response, 'usage')
token_usage = vars(token_usage) if token_usage else {}
llm_output = {'token_usage': token_usage, 'model': self.model}
return ChatResult(generations=generations, llm_output=llm_output) | def _create_chat_result(self, response: MistralChatCompletionResponse
) ->ChatResult:
generations = []
for res in response.choices:
finish_reason = getattr(res, 'finish_reason')
if finish_reason:
finish_reason = finish_reason.value
gen = ChatGeneration(message=
_convert_mistral_chat_message_to_message(res.message),
generation_info={'finish_reason': finish_reason})
generations.append(gen)
token_usage = getattr(response, 'usage')
token_usage = vars(token_usage) if token_usage else {}
llm_output = {'token_usage': token_usage, 'model': self.model}
return ChatResult(generations=generations, llm_output=llm_output) | null |
initialize_agent | """Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default to
AgentType.ZERO_SHOT_REACT_DESCRIPTION.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use.
agent_kwargs: Additional keyword arguments to pass to the underlying agent
tags: Tags to apply to the traced runs.
**kwargs: Additional keyword arguments passed to the agent executor
Returns:
An agent executor
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
'Both `agent` and `agent_path` are specified, but at most only one should be.'
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f'Got unknown agent type: {agent}. Valid types are: {AGENT_TO_CLASS.keys()}.'
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(llm, tools, callback_manager=
callback_manager, **agent_kwargs)
elif agent_path is not None:
agent_obj = load_agent(agent_path, llm=llm, tools=tools,
callback_manager=callback_manager)
try:
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
'Somehow both `agent` and `agent_path` are None, this should never happen.'
)
return AgentExecutor.from_agent_and_tools(agent=agent_obj, tools=tools,
callback_manager=callback_manager, tags=tags_, **kwargs) | def initialize_agent(tools: Sequence[BaseTool], llm: BaseLanguageModel,
agent: Optional[AgentType]=None, callback_manager: Optional[
BaseCallbackManager]=None, agent_path: Optional[str]=None, agent_kwargs:
Optional[dict]=None, *, tags: Optional[Sequence[str]]=None, **kwargs: Any
) ->AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default to
AgentType.ZERO_SHOT_REACT_DESCRIPTION.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use.
agent_kwargs: Additional keyword arguments to pass to the underlying agent
tags: Tags to apply to the traced runs.
**kwargs: Additional keyword arguments passed to the agent executor
Returns:
An agent executor
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
'Both `agent` and `agent_path` are specified, but at most only one should be.'
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f'Got unknown agent type: {agent}. Valid types are: {AGENT_TO_CLASS.keys()}.'
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(llm, tools,
callback_manager=callback_manager, **agent_kwargs)
elif agent_path is not None:
agent_obj = load_agent(agent_path, llm=llm, tools=tools,
callback_manager=callback_manager)
try:
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
'Somehow both `agent` and `agent_path` are None, this should never happen.'
)
return AgentExecutor.from_agent_and_tools(agent=agent_obj, tools=tools,
callback_manager=callback_manager, tags=tags_, **kwargs) | Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default to
AgentType.ZERO_SHOT_REACT_DESCRIPTION.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use.
agent_kwargs: Additional keyword arguments to pass to the underlying agent
tags: Tags to apply to the traced runs.
**kwargs: Additional keyword arguments passed to the agent executor
Returns:
An agent executor |
_import_alibaba_cloud_open_search_settings | from langchain_community.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearchSettings
return AlibabaCloudOpenSearchSettings | def _import_alibaba_cloud_open_search_settings() ->Any:
from langchain_community.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearchSettings
return AlibabaCloudOpenSearchSettings | null |
get_parameters_for_path | from openapi_pydantic import Reference
path_item = self._get_path_strict(path)
parameters = []
if not path_item.parameters:
return []
for parameter in path_item.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters | def get_parameters_for_path(self, path: str) ->List[Parameter]:
from openapi_pydantic import Reference
path_item = self._get_path_strict(path)
parameters = []
if not path_item.parameters:
return []
for parameter in path_item.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters | null |
get_tools | """Get the tools in the toolkit."""
return self.tools | def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools | Get the tools in the toolkit. |
_similarity_search_with_score | params = {'queryVector': embedding, 'path': self._embedding_key,
'numCandidates': k * 10, 'limit': k, 'index': self._index_name}
if pre_filter:
params['filter'] = pre_filter
query = {'$vectorSearch': params}
pipeline = [query, {'$set': {'score': {'$meta': 'vectorSearchScore'}}}]
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
text = res.pop(self._text_key)
score = res.pop('score')
docs.append((Document(page_content=text, metadata=res), score))
return docs | def _similarity_search_with_score(self, embedding: List[float], k: int=4,
pre_filter: Optional[Dict]=None, post_filter_pipeline: Optional[List[
Dict]]=None) ->List[Tuple[Document, float]]:
params = {'queryVector': embedding, 'path': self._embedding_key,
'numCandidates': k * 10, 'limit': k, 'index': self._index_name}
if pre_filter:
params['filter'] = pre_filter
query = {'$vectorSearch': params}
pipeline = [query, {'$set': {'score': {'$meta': 'vectorSearchScore'}}}]
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
text = res.pop(self._text_key)
score = res.pop('score')
docs.append((Document(page_content=text, metadata=res), score))
return docs | null |
_completion_with_retry | return make_request(llm, **_kwargs) | @retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
return make_request(llm, **_kwargs) | null |
completion_with_retry | """Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.completion(**kwargs)
return _completion_with_retry(**kwargs) | def completion_with_retry(self, run_manager: Optional[
CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.completion(**kwargs)
return _completion_with_retry(**kwargs) | Use tenacity to retry the completion call. |
_generate | llm_input = self._to_chat_prompt(messages)
llm_result = self.llm._generate(prompts=[llm_input], stop=stop, run_manager
=run_manager, **kwargs)
return self._to_chat_result(llm_result) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
llm_input = self._to_chat_prompt(messages)
llm_result = self.llm._generate(prompts=[llm_input], stop=stop,
run_manager=run_manager, **kwargs)
return self._to_chat_result(llm_result) | null |
__getitem__ | value = dict.__getitem__(self, key)
return attrdict(value) if isinstance(value, dict) else value | def __getitem__(self, key: str) ->Any:
value = dict.__getitem__(self, key)
return attrdict(value) if isinstance(value, dict) else value | null |
__repr__ | return f"""Tokens Used: {self.total_tokens}
Prompt Tokens: {self.prompt_tokens}
Completion Tokens: {self.completion_tokens}
Successful Requests: {self.successful_requests}
Total Cost (USD): ${self.total_cost}""" | def __repr__(self) ->str:
return f"""Tokens Used: {self.total_tokens}
Prompt Tokens: {self.prompt_tokens}
Completion Tokens: {self.completion_tokens}
Successful Requests: {self.successful_requests}
Total Cost (USD): ${self.total_cost}""" | null |
__init__ | self.keys: Set[str] = set() | def __init__(self) ->None:
self.keys: Set[str] = set() | null |
test__convert_dict_to_message_other_role | message_dict = {'role': 'system', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role='system', content='foo')
assert result == expected_output | def test__convert_dict_to_message_other_role() ->None:
message_dict = {'role': 'system', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role='system', content='foo')
assert result == expected_output | null |
add_texts | """Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids for documents.
Ids will be autogenerated if not provided.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
docs = self._prep_docs(texts, metadatas, ids)
result = self.search_index.add_documents(docs)
return [r.id for r in result] | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids for documents.
Ids will be autogenerated if not provided.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
docs = self._prep_docs(texts, metadatas, ids)
result = self.search_index.add_documents(docs)
return [r.id for r in result] | Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids for documents.
Ids will be autogenerated if not provided.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore. |
get_num_tokens | """Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
try:
result = self.client_preview.count_tokens([text])
except AttributeError:
raise_vertex_import_error()
return result.total_tokens | def get_num_tokens(self, text: str) ->int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
try:
result = self.client_preview.count_tokens([text])
except AttributeError:
raise_vertex_import_error()
return result.total_tokens | Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text. |
output_keys | return [self.output_key] | @property
def output_keys(self) ->List[str]:
return [self.output_key] | null |
parse | if '```bash' in text:
return self.get_code_blocks(text)
else:
raise OutputParserException(f'Failed to parse bash output. Got: {text}') | def parse(self, text: str) ->List[str]:
if '```bash' in text:
return self.get_code_blocks(text)
else:
raise OutputParserException(f'Failed to parse bash output. Got: {text}'
) | null |
similarity_search_by_vector | """Perform a similarity search with Yellowbrick by vectors
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of documents
"""
documents = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k)
return [doc for doc, _ in documents] | def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
"""Perform a similarity search with Yellowbrick by vectors
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of documents
"""
documents = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k)
return [doc for doc, _ in documents] | Perform a similarity search with Yellowbrick by vectors
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of documents |
_get_urls | if self.urls:
return self.urls
try:
import listparser
except ImportError as e:
raise ImportError(
"Package listparser must be installed if the opml arg is used. Please install with 'pip install listparser' or use the urls arg instead."
) from e
rss = listparser.parse_folder(self.opml)
return [feed.url for feed in rss.feeds] | @property
def _get_urls(self) ->Sequence[str]:
if self.urls:
return self.urls
try:
import listparser
except ImportError as e:
raise ImportError(
"Package listparser must be installed if the opml arg is used. Please install with 'pip install listparser' or use the urls arg instead."
) from e
rss = listparser.parse_folder(self.opml)
return [feed.url for feed in rss.feeds] | null |
test_yield_keys | store = UpstashRedisByteStore(client=redis_client, ttl=None)
redis_client.mset({'key1': 'value2', 'key2': 'value2'})
assert sorted(store.yield_keys()) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='key*')) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='lang*')) == [] | def test_yield_keys(redis_client: Redis) ->None:
store = UpstashRedisByteStore(client=redis_client, ttl=None)
redis_client.mset({'key1': 'value2', 'key2': 'value2'})
assert sorted(store.yield_keys()) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='key*')) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='lang*')) == [] | null |
__init__ | """Initialize the TelegramChatLoader.
Args:
path (Union[str, Path]): Path to the exported Telegram chat zip,
directory, json, or HTML file.
"""
self.path = path if isinstance(path, str) else str(path) | def __init__(self, path: Union[str, Path]):
"""Initialize the TelegramChatLoader.
Args:
path (Union[str, Path]): Path to the exported Telegram chat zip,
directory, json, or HTML file.
"""
self.path = path if isinstance(path, str) else str(path) | Initialize the TelegramChatLoader.
Args:
path (Union[str, Path]): Path to the exported Telegram chat zip,
directory, json, or HTML file. |
parse | try:
return self.enum(response.strip())
except ValueError:
raise OutputParserException(
f"Response '{response}' is not one of the expected values: {self._valid_values}"
) | def parse(self, response: str) ->Any:
try:
return self.enum(response.strip())
except ValueError:
raise OutputParserException(
f"Response '{response}' is not one of the expected values: {self._valid_values}"
) | null |
_load_run_evaluators | """
Load run evaluators from a configuration.
Args:
config: Configuration for the run evaluators.
Returns:
A list of run evaluators.
"""
run_evaluators = []
input_key, prediction_key, reference_key = None, None, None
if config.evaluators or any([isinstance(e, EvaluatorType) for e in config.
evaluators]) or config.custom_evaluators and any([isinstance(e,
StringEvaluator) for e in config.custom_evaluators]):
input_key, prediction_key, reference_key = _get_keys(config, run_inputs,
run_outputs, example_outputs)
for eval_config in config.evaluators:
run_evaluator = _construct_run_evaluator(eval_config, config.eval_llm,
run_type, data_type, example_outputs, reference_key, input_key,
prediction_key)
run_evaluators.append(run_evaluator)
custom_evaluators = config.custom_evaluators or []
for custom_evaluator in custom_evaluators:
if isinstance(custom_evaluator, RunEvaluator):
run_evaluators.append(custom_evaluator)
elif isinstance(custom_evaluator, StringEvaluator):
run_evaluators.append(smith_eval.StringRunEvaluatorChain.
from_run_and_data_type(custom_evaluator, run_type, data_type,
input_key=input_key, prediction_key=prediction_key,
reference_key=reference_key))
else:
raise ValueError(
f'Unsupported custom evaluator: {custom_evaluator}. Expected RunEvaluator or StringEvaluator.'
)
return run_evaluators | def _load_run_evaluators(config: smith_eval.RunEvalConfig, run_type: str,
data_type: DataType, example_outputs: Optional[List[str]], run_inputs:
Optional[List[str]], run_outputs: Optional[List[str]]) ->List[RunEvaluator
]:
"""
Load run evaluators from a configuration.
Args:
config: Configuration for the run evaluators.
Returns:
A list of run evaluators.
"""
run_evaluators = []
input_key, prediction_key, reference_key = None, None, None
if config.evaluators or any([isinstance(e, EvaluatorType) for e in
config.evaluators]) or config.custom_evaluators and any([isinstance
(e, StringEvaluator) for e in config.custom_evaluators]):
input_key, prediction_key, reference_key = _get_keys(config,
run_inputs, run_outputs, example_outputs)
for eval_config in config.evaluators:
run_evaluator = _construct_run_evaluator(eval_config, config.
eval_llm, run_type, data_type, example_outputs, reference_key,
input_key, prediction_key)
run_evaluators.append(run_evaluator)
custom_evaluators = config.custom_evaluators or []
for custom_evaluator in custom_evaluators:
if isinstance(custom_evaluator, RunEvaluator):
run_evaluators.append(custom_evaluator)
elif isinstance(custom_evaluator, StringEvaluator):
run_evaluators.append(smith_eval.StringRunEvaluatorChain.
from_run_and_data_type(custom_evaluator, run_type,
data_type, input_key=input_key, prediction_key=
prediction_key, reference_key=reference_key))
else:
raise ValueError(
f'Unsupported custom evaluator: {custom_evaluator}. Expected RunEvaluator or StringEvaluator.'
)
return run_evaluators | Load run evaluators from a configuration.
Args:
config: Configuration for the run evaluators.
Returns:
A list of run evaluators. |
_wait_last_task | client = self.client()
tasks = client.get_tasks()
client.wait_for_task(tasks.results[0].uid) | def _wait_last_task(self) ->None:
client = self.client()
tasks = client.get_tasks()
client.wait_for_task(tasks.results[0].uid) | null |
dict | """Return dictionary representation of agent."""
_dict = super().dict()
del _dict['output_parser']
return _dict | def dict(self, **kwargs: Any) ->Dict:
"""Return dictionary representation of agent."""
_dict = super().dict()
del _dict['output_parser']
return _dict | Return dictionary representation of agent. |
_stream | message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': True}
default_chunk_class = AIMessageChunk
generator_response = completion_with_retry(self, messages=message_dicts,
models_priority_list=self.models_priority_list, run_manager=run_manager,
**params)
for chunk in generator_response:
if chunk.event != 'update':
continue
chunk, default_chunk_class = self._create_chat_generation_chunk(chunk.
data, default_chunk_class)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.message.content, chunk=
chunk.message) | def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': True}
default_chunk_class = AIMessageChunk
generator_response = completion_with_retry(self, messages=message_dicts,
models_priority_list=self.models_priority_list, run_manager=
run_manager, **params)
for chunk in generator_response:
if chunk.event != 'update':
continue
chunk, default_chunk_class = self._create_chat_generation_chunk(chunk
.data, default_chunk_class)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.message.content, chunk
=chunk.message) | null |
on_agent_finish | """Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
handle_event(self.handlers, 'on_agent_finish', 'ignore_agent', finish,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags,
**kwargs) | def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
handle_event(self.handlers, 'on_agent_finish', 'ignore_agent', finish,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.
tags, **kwargs) | Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback. |
test_kotlin_code_splitter | splitter = RecursiveCharacterTextSplitter.from_language(Language.KOTLIN,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
class HelloWorld {
companion object {
@JvmStatic
fun main(args: Array<String>) {
println("Hello, World!")
}
}
}
"""
chunks = splitter.split_text(code)
assert chunks == ['class', 'HelloWorld {', 'companion', 'object {',
'@JvmStatic', 'fun', 'main(args:', 'Array<String>)', '{',
'println("Hello,', 'World!")', '}\n }', '}'] | def test_kotlin_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.KOTLIN,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
class HelloWorld {
companion object {
@JvmStatic
fun main(args: Array<String>) {
println("Hello, World!")
}
}
}
"""
chunks = splitter.split_text(code)
assert chunks == ['class', 'HelloWorld {', 'companion', 'object {',
'@JvmStatic', 'fun', 'main(args:', 'Array<String>)', '{',
'println("Hello,', 'World!")', '}\n }', '}'] | null |
construct_html_from_prompt_and_generation | """Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(wandb.Html): The html element."""
wandb = import_wandb()
formatted_prompt = prompt.replace('\n', '<br>')
formatted_generation = generation.replace('\n', '<br>')
return wandb.Html(
f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
"""
, inject=False) | def construct_html_from_prompt_and_generation(prompt: str, generation: str
) ->Any:
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(wandb.Html): The html element."""
wandb = import_wandb()
formatted_prompt = prompt.replace('\n', '<br>')
formatted_generation = generation.replace('\n', '<br>')
return wandb.Html(
f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
"""
, inject=False) | Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(wandb.Html): The html element. |
test_sentence_transformers_count_tokens | splitter = SentenceTransformersTokenTextSplitter(model_name=
'sentence-transformers/paraphrase-albert-small-v2')
text = 'Lorem ipsum'
token_count = splitter.count_tokens(text=text)
expected_start_stop_token_count = 2
expected_text_token_count = 5
expected_token_count = (expected_start_stop_token_count +
expected_text_token_count)
assert expected_token_count == token_count | def test_sentence_transformers_count_tokens() ->None:
splitter = SentenceTransformersTokenTextSplitter(model_name=
'sentence-transformers/paraphrase-albert-small-v2')
text = 'Lorem ipsum'
token_count = splitter.count_tokens(text=text)
expected_start_stop_token_count = 2
expected_text_token_count = 5
expected_token_count = (expected_start_stop_token_count +
expected_text_token_count)
assert expected_token_count == token_count | null |
validate_client | """Validate and update client arguments, including API key and formatting"""
if not values.get('client'):
values['client'] = NVEModel(**values)
return values | @root_validator(pre=True)
def validate_client(cls, values: Any) ->Any:
"""Validate and update client arguments, including API key and formatting"""
if not values.get('client'):
values['client'] = NVEModel(**values)
return values | Validate and update client arguments, including API key and formatting |
parse | """Parse the output of an LLM call."""
return re.findall(self.pattern, text, re.MULTILINE) | def parse(self, text: str) ->List[str]:
"""Parse the output of an LLM call."""
return re.findall(self.pattern, text, re.MULTILINE) | Parse the output of an LLM call. |
completion_with_retry | """Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
stream = kwargs.pop('stream', False)
if stream:
return self.client.chat_stream(**kwargs)
else:
return self.client.chat(**kwargs)
return _completion_with_retry(**kwargs) | def completion_with_retry(self, run_manager: Optional[
CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
stream = kwargs.pop('stream', False)
if stream:
return self.client.chat_stream(**kwargs)
else:
return self.client.chat(**kwargs)
return _completion_with_retry(**kwargs) | Use tenacity to retry the completion call. |
ignore_llm | """Whether to ignore LLM callbacks."""
return self.ignore_llm_ | @property
def ignore_llm(self) ->bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_ | Whether to ignore LLM callbacks. |
test_integration_initialization | """Test chat model initialization."""
ChatNVIDIA(model='llama2_13b', nvidia_api_key='nvapi-...', temperature=0.5,
top_p=0.9, max_tokens=50)
ChatNVIDIA(model='mistral', nvidia_api_key='nvapi-...') | def test_integration_initialization() ->None:
"""Test chat model initialization."""
ChatNVIDIA(model='llama2_13b', nvidia_api_key='nvapi-...', temperature=
0.5, top_p=0.9, max_tokens=50)
ChatNVIDIA(model='mistral', nvidia_api_key='nvapi-...') | Test chat model initialization. |
create_client | if values.get('client') is not None:
return values
try:
import boto3
from botocore.client import Config
from botocore.exceptions import UnknownServiceError
if values.get('credentials_profile_name'):
session = boto3.Session(profile_name=values['credentials_profile_name']
)
else:
session = boto3.Session()
client_params = {'config': Config(connect_timeout=120, read_timeout=120,
retries={'max_attempts': 0})}
if values.get('region_name'):
client_params['region_name'] = values['region_name']
if values.get('endpoint_url'):
client_params['endpoint_url'] = values['endpoint_url']
values['client'] = session.client('bedrock-agent-runtime', **client_params)
return values
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except UnknownServiceError as e:
raise ModuleNotFoundError(
'Ensure that you have installed the latest boto3 package that contains the API for `bedrock-runtime-agent`.'
) from e
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e | @root_validator(pre=True)
def create_client(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if values.get('client') is not None:
return values
try:
import boto3
from botocore.client import Config
from botocore.exceptions import UnknownServiceError
if values.get('credentials_profile_name'):
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
client_params = {'config': Config(connect_timeout=120, read_timeout
=120, retries={'max_attempts': 0})}
if values.get('region_name'):
client_params['region_name'] = values['region_name']
if values.get('endpoint_url'):
client_params['endpoint_url'] = values['endpoint_url']
values['client'] = session.client('bedrock-agent-runtime', **
client_params)
return values
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except UnknownServiceError as e:
raise ModuleNotFoundError(
'Ensure that you have installed the latest boto3 package that contains the API for `bedrock-runtime-agent`.'
) from e
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e | null |
_import_spark_sql_tool_InfoSparkSQLTool | from langchain_community.tools.spark_sql.tool import InfoSparkSQLTool
return InfoSparkSQLTool | def _import_spark_sql_tool_InfoSparkSQLTool() ->Any:
from langchain_community.tools.spark_sql.tool import InfoSparkSQLTool
return InfoSparkSQLTool | null |
delete | """Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._bind) as session:
if ids is not None:
self.logger.debug(
'Trying to delete vectors by ids (represented by the model using the custom ids field)'
)
stmt = delete(self.EmbeddingStore).where(self.EmbeddingStore.
custom_id.in_(ids))
session.execute(stmt)
session.commit() | def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._bind) as session:
if ids is not None:
self.logger.debug(
'Trying to delete vectors by ids (represented by the model using the custom ids field)'
)
stmt = delete(self.EmbeddingStore).where(self.EmbeddingStore.
custom_id.in_(ids))
session.execute(stmt)
session.commit() | Delete vectors by ids or uuids.
Args:
ids: List of ids to delete. |
_Ellipsis | self.write('...') | def _Ellipsis(self, t):
self.write('...') | null |
_assert_with_parser | """Standard tests to verify that the given parser works.
Args:
parser (BaseBlobParser): The parser to test.
splits_by_page (bool): Whether the parser splits by page or not by default.
"""
blob = Blob.from_path(HELLO_PDF)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
assert len(docs) == 1
page_content = docs[0].page_content
assert isinstance(page_content, str)
assert docs[0].page_content.startswith('Hello world!')
blob = Blob.from_path(LAYOUT_PARSER_PAPER_PDF)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
if splits_by_page:
assert len(docs) == 16
else:
assert len(docs) == 1
assert 'LayoutParser' in docs[0].page_content
metadata = docs[0].metadata
assert metadata['source'] == str(LAYOUT_PARSER_PAPER_PDF)
if splits_by_page:
assert metadata['page'] == 0 | def _assert_with_parser(parser: BaseBlobParser, splits_by_page: bool=True
) ->None:
"""Standard tests to verify that the given parser works.
Args:
parser (BaseBlobParser): The parser to test.
splits_by_page (bool): Whether the parser splits by page or not by default.
"""
blob = Blob.from_path(HELLO_PDF)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
assert len(docs) == 1
page_content = docs[0].page_content
assert isinstance(page_content, str)
assert docs[0].page_content.startswith('Hello world!')
blob = Blob.from_path(LAYOUT_PARSER_PAPER_PDF)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
if splits_by_page:
assert len(docs) == 16
else:
assert len(docs) == 1
assert 'LayoutParser' in docs[0].page_content
metadata = docs[0].metadata
assert metadata['source'] == str(LAYOUT_PARSER_PAPER_PDF)
if splits_by_page:
assert metadata['page'] == 0 | Standard tests to verify that the given parser works.
Args:
parser (BaseBlobParser): The parser to test.
splits_by_page (bool): Whether the parser splits by page or not by default. |
test_azure_openai_embedding_documents_multiple | """Test openai embeddings."""
documents = ['foo bar', 'bar foo', 'foo']
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert embedding.chunk_size == 2
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536 | @pytest.mark.scheduled
def test_azure_openai_embedding_documents_multiple() ->None:
"""Test openai embeddings."""
documents = ['foo bar', 'bar foo', 'foo']
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert embedding.chunk_size == 2
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536 | Test openai embeddings. |
on_chain_start | """Do nothing when chain starts"""
pass | def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Do nothing when chain starts"""
pass | Do nothing when chain starts |
load | self.__validate_instance()
presigned = self.__lakefs_client.is_presign_supported()
docs: List[Document] = []
objs = self.__lakefs_client.ls_objects(repo=self.repo, ref=self.ref, path=
self.path, presign=presigned)
for obj in objs:
lakefs_unstructured_loader = UnstructuredLakeFSLoader(obj[1], self.repo,
self.ref, obj[0], presigned)
docs.extend(lakefs_unstructured_loader.load())
return docs | def load(self) ->List[Document]:
self.__validate_instance()
presigned = self.__lakefs_client.is_presign_supported()
docs: List[Document] = []
objs = self.__lakefs_client.ls_objects(repo=self.repo, ref=self.ref,
path=self.path, presign=presigned)
for obj in objs:
lakefs_unstructured_loader = UnstructuredLakeFSLoader(obj[1], self.
repo, self.ref, obj[0], presigned)
docs.extend(lakefs_unstructured_loader.load())
return docs | null |
_call | """Call to Beam."""
url = 'https://apps.beam.cloud/' + self.app_id if self.app_id else self.url
payload = {'prompt': prompt, 'max_length': self.max_length}
payload.update(kwargs)
headers = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate',
'Authorization': 'Basic ' + self.authorization, 'Connection':
'keep-alive', 'Content-Type': 'application/json'}
for _ in range(DEFAULT_NUM_TRIES):
request = requests.post(url, headers=headers, data=json.dumps(payload))
if request.status_code == 200:
return request.json()['text']
time.sleep(DEFAULT_SLEEP_TIME)
logger.warning('Unable to successfully call model.')
return '' | def _call(self, prompt: str, stop: Optional[list]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call to Beam."""
url = 'https://apps.beam.cloud/' + self.app_id if self.app_id else self.url
payload = {'prompt': prompt, 'max_length': self.max_length}
payload.update(kwargs)
headers = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate',
'Authorization': 'Basic ' + self.authorization, 'Connection':
'keep-alive', 'Content-Type': 'application/json'}
for _ in range(DEFAULT_NUM_TRIES):
request = requests.post(url, headers=headers, data=json.dumps(payload))
if request.status_code == 200:
return request.json()['text']
time.sleep(DEFAULT_SLEEP_TIME)
logger.warning('Unable to successfully call model.')
return '' | Call to Beam. |
test_load_success_paper_name | """Test a query of paper name that returns one document"""
docs = api_client.load('Heat-bath random walks with Markov bases')
assert len(docs) == 3
assert_docs(docs) | def test_load_success_paper_name(api_client: ArxivAPIWrapper) ->None:
"""Test a query of paper name that returns one document"""
docs = api_client.load('Heat-bath random walks with Markov bases')
assert len(docs) == 3
assert_docs(docs) | Test a query of paper name that returns one document |
dialect | """Return string representation of SQL dialect to use."""
return self.db.dialect | @property
def dialect(self) ->str:
"""Return string representation of SQL dialect to use."""
return self.db.dialect | Return string representation of SQL dialect to use. |
get_config_list | """Get a list of configs from a single config or a list of configs.
It is useful for subclasses overriding batch() or abatch().
Args:
config (Optional[Union[RunnableConfig, List[RunnableConfig]]]):
The config or list of configs.
length (int): The length of the list.
Returns:
List[RunnableConfig]: The list of configs.
Raises:
ValueError: If the length of the list is not equal to the length of the inputs.
"""
if length < 0:
raise ValueError(f'length must be >= 0, but got {length}')
if isinstance(config, list) and len(config) != length:
raise ValueError(
f'config must be a list of the same length as inputs, but got {len(config)} configs for {length} inputs'
)
return list(map(ensure_config, config)) if isinstance(config, list) else [
ensure_config(config) for _ in range(length)] | def get_config_list(config: Optional[Union[RunnableConfig, List[
RunnableConfig]]], length: int) ->List[RunnableConfig]:
"""Get a list of configs from a single config or a list of configs.
It is useful for subclasses overriding batch() or abatch().
Args:
config (Optional[Union[RunnableConfig, List[RunnableConfig]]]):
The config or list of configs.
length (int): The length of the list.
Returns:
List[RunnableConfig]: The list of configs.
Raises:
ValueError: If the length of the list is not equal to the length of the inputs.
"""
if length < 0:
raise ValueError(f'length must be >= 0, but got {length}')
if isinstance(config, list) and len(config) != length:
raise ValueError(
f'config must be a list of the same length as inputs, but got {len(config)} configs for {length} inputs'
)
return list(map(ensure_config, config)) if isinstance(config, list) else [
ensure_config(config) for _ in range(length)] | Get a list of configs from a single config or a list of configs.
It is useful for subclasses overriding batch() or abatch().
Args:
config (Optional[Union[RunnableConfig, List[RunnableConfig]]]):
The config or list of configs.
length (int): The length of the list.
Returns:
List[RunnableConfig]: The list of configs.
Raises:
ValueError: If the length of the list is not equal to the length of the inputs. |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
Subsets and Splits