stmnk commited on
Commit
38a02be
·
1 Parent(s): 7a62924

add es method

Browse files
Files changed (1) hide show
  1. app.py +84 -30
app.py CHANGED
@@ -15,46 +15,98 @@ def query(payload):
15
 
16
 
17
  function_code = r"""
18
- def fetch_archive_from_http(url: str, output_dir: str, proxies: Optional[dict] = None) -> bool:
19
- path = Path(output_dir)
20
- if not path.exists():
21
- path.mkdir(parents=True)
22
-
23
- is_not_empty = len(list(Path(path).rglob("*"))) > 0
24
- if is_not_empty:
25
- return False
26
- else:
27
- _, _, archive_extension = url.rpartition(".")
28
- request_data = requests.get(url, proxies=proxies)
29
-
30
- if archive_extension == "zip":
31
- zip_archive = zipfile.ZipFile(io.BytesIO(request_data.content))
32
- zip_archive.extractall(output_dir)
33
- elif archive_extension in ["gz", "bz2", "xz"]:
34
- tar_archive = tarfile.open(fileobj=io.BytesIO(request_data.content), mode="r|*")
35
- tar_archive.extractall(output_dir)
36
- else:
37
- pass
38
-
39
- return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  """
41
 
42
- task_code = f'Summarize Python: {function_code}'
43
 
44
  real_docstring = r"""
45
- Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.
 
 
 
 
 
 
 
46
 
47
- :param url: http address
48
- :param output_dir: local path
49
- :param proxies: proxies details as required by requests library
50
- :return: if anything got fetched
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  """
52
 
53
- def pygen_func(function_code):
54
  req_data = {"inputs": task_code}
55
  output = query(req_data)
56
  return str(output)
57
 
 
58
  # inputs = {'code_nl': code_nl}
59
  # payload = json.dumps(inputs)
60
  # prediction = req.request(CT5_METHOD, CT5_URL, data=payload)
@@ -62,7 +114,9 @@ def pygen_func(function_code):
62
  # answer = json.loads(prediction.content.decode("utf-8"))
63
  # return str(answer)
64
 
65
- iface = gr.Interface(pygen_func,
 
 
66
  [
67
  # gr.inputs.Textbox(lines=7, label="Code Intent (NL)", default=task_code),
68
  gr.inputs.Textbox(lines=7, label="Task + Code (PL)", default=task_code),
 
15
 
16
 
17
  function_code = r"""
18
+ def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None,
19
+ batch_size: int = 10_000, duplicate_documents: Optional[str] = None):
20
+
21
+ if index and not self.client.indices.exists(index=index):
22
+ self._create_document_index(index)
23
+
24
+ if index is None:
25
+ index = self.index
26
+ duplicate_documents = duplicate_documents or self.duplicate_documents
27
+ assert duplicate_documents in self.duplicate_documents_options, \
28
+ f"duplicate_documents parameter must be {', '.join(self.duplicate_documents_options)}"
29
+
30
+ field_map = self._create_document_field_map()
31
+ document_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in documents]
32
+ document_objects = self._handle_duplicate_documents(documents=document_objects,
33
+ index=index,
34
+ duplicate_documents=duplicate_documents)
35
+ documents_to_index = []
36
+ for doc in document_objects:
37
+ _doc = {
38
+ "_op_type": "index" if duplicate_documents == 'overwrite' else "create",
39
+ "_index": index,
40
+ **doc.to_dict(field_map=self._create_document_field_map())
41
+ } # type: Dict[str, Any]
42
+
43
+ # cast embedding type as ES cannot deal with np.array
44
+ if _doc[self.embedding_field] is not None:
45
+ if type(_doc[self.embedding_field]) == np.ndarray:
46
+ _doc[self.embedding_field] = _doc[self.embedding_field].tolist()
47
+
48
+ # rename id for elastic
49
+ _doc["_id"] = str(_doc.pop("id"))
50
+
51
+ # don't index query score and empty fields
52
+ _ = _doc.pop("score", None)
53
+ _doc = {k:v for k,v in _doc.items() if v is not None}
54
+
55
+ # In order to have a flat structure in elastic + similar behaviour to the other DocumentStores,
56
+ # we "unnest" all value within "meta"
57
+ if "meta" in _doc.keys():
58
+ for k, v in _doc["meta"].items():
59
+ _doc[k] = v
60
+ _doc.pop("meta")
61
+ documents_to_index.append(_doc)
62
+
63
+ # Pass batch_size number of documents to bulk
64
+ if len(documents_to_index) % batch_size == 0:
65
+ bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type)
66
+ documents_to_index = []
67
+
68
+ if documents_to_index:
69
+ bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type)
70
+
71
  """
72
 
73
+ task_code = f' Summarize Python: {function_code}'
74
 
75
  real_docstring = r"""
76
+ Indexes documents for later queries in Elasticsearch.
77
+
78
+ Behaviour if a document with the same ID already exists in ElasticSearch:
79
+ a) (Default) Throw Elastic's standard error message for duplicate IDs.
80
+ b) If `self.update_existing_documents=True` for DocumentStore: Overwrite existing documents.
81
+ (This is only relevant if you pass your own ID when initializing a `Document`.
82
+ If don't set custom IDs for your Documents or just pass a list of dictionaries here,
83
+ they will automatically get UUIDs assigned. See the `Document` class for details)
84
 
85
+ :param documents: a list of Python dictionaries or a list of Haystack Document objects.
86
+ For documents as dictionaries, the format is {"content": "<the-actual-text>"}.
87
+ Optionally: Include meta data via {"content": "<the-actual-text>",
88
+ "meta":{"name": "<some-document-name>, "author": "somebody", ...}}
89
+ It can be used for filtering and is accessible in the responses of the Finder.
90
+ Advanced: If you are using your own Elasticsearch mapping, the key names in the dictionary
91
+ should be changed to what you have set for self.content_field and self.name_field.
92
+ :param index: Elasticsearch index where the documents should be indexed. If not supplied, self.index will be used.
93
+ :param batch_size: Number of documents that are passed to Elasticsearch's bulk function at a time.
94
+ :param duplicate_documents: Handle duplicates document based on parameter options.
95
+ Parameter options : ( 'skip','overwrite','fail')
96
+ skip: Ignore the duplicates documents
97
+ overwrite: Update any existing documents with the same ID when adding documents.
98
+ fail: an error is raised if the document ID of the document being added already
99
+ exists.
100
+ :raises DuplicateDocumentError: Exception trigger on duplicate document
101
+ :return: None
102
  """
103
 
104
+ def docgen_func(function_code):
105
  req_data = {"inputs": task_code}
106
  output = query(req_data)
107
  return str(output)
108
 
109
+ def pygen_func(nl_code_intent):
110
  # inputs = {'code_nl': code_nl}
111
  # payload = json.dumps(inputs)
112
  # prediction = req.request(CT5_METHOD, CT5_URL, data=payload)
 
114
  # answer = json.loads(prediction.content.decode("utf-8"))
115
  # return str(answer)
116
 
117
+ iface = gr.Interface(
118
+ # pygen_func,
119
+ docgen_func,
120
  [
121
  # gr.inputs.Textbox(lines=7, label="Code Intent (NL)", default=task_code),
122
  gr.inputs.Textbox(lines=7, label="Task + Code (PL)", default=task_code),