duchaba commited on
Commit
831e490
β€’
1 Parent(s): 0d7f4c2

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +836 -217
app.py CHANGED
@@ -1,14 +1,9 @@
1
- # %%writefile app.py
2
-
3
  ## required lib, required "pip install"
4
- # import transformers
5
- # import accelerate
6
- import openai
7
  import torch
8
  import cryptography
9
  import cryptography.fernet
10
- ## interface libs, required "pip install"
11
- import gradio
12
  import huggingface_hub
13
  import huggingface_hub.hf_api
14
  ## standard libs, no need to install
@@ -22,14 +17,37 @@ import sys
22
  import psutil
23
  import threading
24
  import socket
25
- # import PIL
26
- # import pandas
27
  import matplotlib
28
- class HFace_Pluto(object):
29
- #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  # initialize the object
31
  def __init__(self, name="Pluto",*args, **kwargs):
32
- super(HFace_Pluto, self).__init__(*args, **kwargs)
33
  self.author = "Duc Haba"
34
  self.name = name
35
  self._ph()
@@ -39,38 +57,38 @@ class HFace_Pluto(object):
39
  self._ph()
40
  #
41
  # define class var for stable division
42
- self._device = 'cuda'
43
- self._steps = [3,8,21,55,89,144]
44
- self._guidances = [1.1,3.0,5.0,8.0,13.0,21.0]
45
- self._xkeyfile = '.xoxo'
46
- self._models = []
47
- self._seed = 667 # sum of walnut in ascii (or Angle 667)
48
- self._width = 512
49
- self._height = 512
50
- self._step = 50
51
- self._guidances = 7.5
52
- #self._generator = torch.Generator(device='cuda')
53
- self.pipes = []
54
- self.prompts = []
55
- self.images = []
56
- self.seeds = []
57
  self.fname_id = 0
58
  self.dname_img = "img_colab/"
59
- self._huggingface_key=b'gAAAAABld_3fKLl7aPBJzfAq-th37t95pMu2bVbH9QccOSecaUnm33XrpKpCXP4GL6Wr23g3vtrKWli5JK1ZPh18ilnDb_Su6GoVvU92Vzba64k3gBQwKF_g5DoH2vWq2XM8vx_5mKJh'
60
- self._kaggle_key=b'gAAAAABld_4_B6rrRhFYyfl77dacu1RhR4ktaLU6heYhQBSIj4ELBm7y4DzU1R8-H4yPKd0w08s11wkFJ9AR7XyESxM1SsrMBzqQEeW9JKNbl6jAaonFGmqbhFblkQqH4XjsapZru0qX'
61
- self._fkey="fes_f8Im569hYnI1Tn6FqP-6hS4rdmNOJ6DWcRPOsvc="
62
- self._color_primary = '#2780e3' #blue
63
- self._color_secondary = '#373a3c' #dark gray
64
- self._color_success = '#3fb618' #green
65
- self._color_info = '#9954bb' #purple
66
- self._color_warning = '#ff7518' #orange
67
- self._color_danger = '#ff0039' #red
68
- self._color_mid_gray = '#495057'
69
- self._ok=b'gAAAAABld_-y70otUll4Jwq3jEBXiw1tooSFo_gStRbkCyuu9_Dmdehc4M8lI_hFbum9CwyZuj9ZnXgxFIROebcPSF5qoA197VRvzUDQOMxY5zmHnImVROrsXVdZqXyIeYH_Q6cvXvFTX3rLBIKKWgvJmnpYGRaV6Q=='
 
70
  return
71
  #
72
  # pretty print output name-value line
73
  def _pp(self, a, b,is_print=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  # print("%34s : %s" % (str(a), str(b)))
75
  x = f'{"%34s" % str(a)} : {str(b)}'
76
  y = None
@@ -82,6 +100,16 @@ class HFace_Pluto(object):
82
  #
83
  # pretty print the header or footer lines
84
  def _ph(self,is_print=True):
 
 
 
 
 
 
 
 
 
 
85
  x = f'{"-"*34} : {"-"*34}'
86
  y = None
87
  if (is_print):
@@ -95,23 +123,55 @@ class HFace_Pluto(object):
95
  hf_names,
96
  hf_space="duchaba/monty",
97
  local_dir="/content/"):
98
- f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
 
 
 
 
 
 
 
 
 
 
 
 
99
  try:
100
  for f in hf_names:
101
  lo = local_dir + f
102
- huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f,
103
- use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE,
 
 
104
  force_filename=lo)
105
  except:
106
  self._pp("*Error", f)
107
- return
108
- #
109
  #
 
110
  def push_hface_files(self,
111
  hf_names,
112
  hf_space="duchaba/skin_cancer_diagnose",
113
  local_dir="/content/"):
114
- f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  try:
116
  for f in hf_names:
117
  lo = local_dir + f
@@ -122,10 +182,96 @@ class HFace_Pluto(object):
122
  repo_type=huggingface_hub.REPO_TYPE_SPACE)
123
  except Exception as e:
124
  self._pp("*Error", e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  return
126
  #
127
  # Define a function to display available CPU and RAM
128
- def fetch_system_info(self):
 
 
 
 
 
 
 
 
 
 
 
129
  s=''
130
  # Get CPU usage as a percentage
131
  cpu_usage = psutil.cpu_percent()
@@ -135,134 +281,515 @@ class HFace_Pluto(object):
135
  mem_total_gb = mem.total / (1024 ** 3)
136
  mem_available_gb = mem.available / (1024 ** 3)
137
  mem_used_gb = mem.used / (1024 ** 3)
138
- # Print the results
139
- s += f"CPU usage: {cpu_usage}%\n"
140
  s += f"Total memory: {mem_total_gb:.2f} GB\n"
141
  s += f"Available memory: {mem_available_gb:.2f} GB\n"
142
  # print(f"Used memory: {mem_used_gb:.2f} GB")
143
  s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n"
 
 
 
 
 
 
 
 
144
  return s
145
  #
146
- def restart_script_periodically(self):
147
- while True:
148
- #random_time = random.randint(540, 600)
149
- random_time = random.randint(15800, 21600)
150
- time.sleep(random_time)
151
- os.execl(sys.executable, sys.executable, *sys.argv)
152
- return
153
- #
154
- def write_file(self,fname, txt):
155
- f = open(fname, "w")
156
- f.writelines("\n".join(txt))
157
- f.close()
158
- return
159
- #
160
- def fetch_gpu_info(self):
161
  s=''
 
 
162
  try:
163
- s += f'Your GPU is the {torch.cuda.get_device_name(0)}\n'
164
- s += f'GPU ready staus {torch.cuda.is_available()}\n'
165
- s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n'
166
- s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n'
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  except Exception as e:
168
  s += f'**Warning, No GPU: {e}'
169
  return s
170
  #
171
- def _fetch_crypt(self,is_generate=False):
172
- s=self._fkey
173
- if (is_generate):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  s=open(self._xkeyfile, "rb").read()
 
175
  return s
176
  #
177
- def _gen_key(self):
 
 
 
 
 
 
 
 
 
 
 
178
  key = cryptography.fernet.Fernet.generate_key()
179
  with open(self._xkeyfile, "wb") as key_file:
180
- key_file.write(key)
181
- return
182
  #
183
- def _decrypt_it(self, x):
 
 
 
 
 
 
 
 
 
 
184
  y = self._fetch_crypt()
185
  f = cryptography.fernet.Fernet(y)
186
  m = f.decrypt(x)
187
  return m.decode()
188
  #
189
- def _encrypt_it(self, x):
 
 
 
 
 
 
 
 
 
 
 
190
  key = self._fetch_crypt()
191
  p = x.encode()
192
  f = cryptography.fernet.Fernet(key)
193
  y = f.encrypt(p)
194
  return y
195
  #
196
- def _login_hface(self):
197
- huggingface_hub.login(self._decrypt_it(self._huggingface_key),
198
- add_to_git_credential=True) # non-blocking login
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  self._ph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  return
201
  #
202
- def _fetch_version(self):
203
- s = ''
204
- # print(f"{'torch: 2.0.1':<25} Actual: {torch.__version__}")
205
- # print(f"{'transformers: 4.29.2':<25} Actual: {transformers.__version__}")
206
- s += f"{'openai: 0.27.7,':<28} Actual: {openai.__version__}\n"
207
- s += f"{'huggingface_hub: 0.14.1,':<28} Actual: {huggingface_hub.__version__}\n"
208
- s += f"{'gradio: 3.32.0,':<28} Actual: {gradio.__version__}\n"
209
- s += f"{'cryptography: 40.0.2,':<28} cryptography: {gradio.__version__}\n"
210
 
211
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  #
213
- def _fetch_host_ip(self):
214
- s=''
215
- hostname = socket.gethostname()
216
- ip_address = socket.gethostbyname(hostname)
217
- s += f"Hostname: {hostname}\n"
218
- s += f"IP Address: {ip_address}\n"
219
- return s
220
  #
221
- def fetch_code_cells_from_notebook(self, notebook_name, filter_magic="# %%write",
222
- write_to_file=True, fname_override=None):
223
  """
224
- Reads a Jupyter notebook (.ipynb file) and writes out all the code cells
225
- that start with the specified magic command to a .py file.
 
226
 
227
  Parameters:
228
- - notebook_name (str): Name of the notebook file (with .ipynb extension).
229
- - filter_magic (str): Magic command filter. Only cells starting with this command will be written.
230
- The defualt is: "# %%write"
231
- - write_to_file (bool): If True, writes the filtered cells to a .py file.
232
- Otherwise, prints them to the standard output. The default is True.
233
- - fname_override (str): If provided, overrides the output filename. The default is None.
234
 
235
  Returns:
236
- - None: Writes the filtered code cells to a .py file or prints them based on the parameters.
237
-
238
- """
239
- with open(notebook_name, 'r', encoding='utf-8') as f:
240
- notebook_content = json.load(f)
241
-
242
- output_content = []
243
-
244
- # Loop through all the cells in the notebook
245
- for cell in notebook_content['cells']:
246
- # Check if the cell type is 'code' and starts with the specified magic command
247
- if cell['cell_type'] == 'code' and cell['source'] and cell['source'][0].startswith(filter_magic):
248
- # Append the source code of the cell to output_content
249
- output_content.append(''.join(cell['source']))
250
-
251
- if write_to_file:
252
- if fname_override is None:
253
- # Derive the output filename by replacing .ipynb with .py
254
- output_filename = notebook_name.replace(".ipynb", ".py")
255
- else:
256
- output_filename = fname_override
257
- with open(output_filename, 'w', encoding='utf-8') as f:
258
- f.write('\n'.join(output_content))
259
- print(f'File: {output_filename} written to disk.')
260
- else:
261
- # Print the code cells to the standard output
262
- print('\n'.join(output_content))
263
- print('-' * 40) # print separator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  return
265
  #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  # add module/method
267
  #
268
  import functools
@@ -275,57 +802,48 @@ def add_method(cls):
275
  return func # returning func means func can still be used normally
276
  return decorator
277
  #
278
- monty = HFace_Pluto("Monty, The lord of the magpies.")
279
- monty._login_hface()
280
- print(monty._fetch_version())
281
- monty._ph()
282
- print(monty.fetch_system_info())
283
- monty._ph()
284
- print(monty.fetch_gpu_info())
285
- monty._ph()
286
- print(monty._fetch_host_ip())
287
- monty._ph()
 
 
 
 
 
 
 
 
 
 
 
 
288
  # %%write -a app.py
289
 
290
  # client.moderations.create()
291
- # ai_client = openai.OpenAI(api_key=monty._decrypt_it(monty._ok))
292
- # %%writefile -a app.py
293
 
294
- #@add_method(HFace_Pluto)
295
- # # for OpenAI less version 0.27.7
296
- # def _censor_me(self, p, safer=0.0005):
297
- # #openai.Moderation.create()
298
- # omod = openai.Moderation.create(p)
299
- # r = omod.results[0].category_scores
300
- # jmod = json.loads(str(r))
301
- # #
302
- # max_key = max(jmod, key=jmod.get)
303
- # max_value = jmod[max_key]
304
- # sum_value = sum(jmod.values())
305
- # #
306
- # jmod["is_safer_flagged"] = False
307
- # if (max_value >= safer):
308
- # jmod["is_safer_flagged"] = True
309
- # jmod["is_flagged"] = omod.results[0].flagged
310
- # jmod['max_key'] = max_key
311
- # jmod['max_value'] = max_value
312
- # jmod['sum_value'] = sum_value
313
- # jmod['safer_value'] = safer
314
- # jmod['message'] = p
315
- # return jmod
316
- #
317
- # openai.api_key = monty._decrypt_it(monty._gpt_key)
318
  #
319
  # # for openai version 1.3.8
320
- @add_method(HFace_Pluto)
321
- # for OpenAI less version 0.27.7
322
  def _fetch_moderate_engine(self):
323
- self.ai_client = openai.OpenAI(api_key=self._decrypt_it(self._ok))
324
  self.text_model = "text-moderation-latest"
325
  return
326
  #
327
- @add_method(HFace_Pluto)
328
- # for OpenAI less version 0.27.7
329
  def _censor_me(self, p, safer=0.0005):
330
  self._fetch_moderate_engine()
331
  resp_orig = self.ai_client.moderations.create(input=p, model=self.text_model)
@@ -347,29 +865,29 @@ def _censor_me(self, p, safer=0.0005):
347
  v1['message'] = p
348
  return v1
349
  #
350
- @add_method(HFace_Pluto)
351
  def _draw_censor(self,data):
352
  self._color_mid_gray = '#6c757d'
353
  exp = (0.01, 0.01)
354
- x = [data['max_value'], (data['sum_value']-data['max_value'])]
355
- title='\nMessage Is Flagged As Unsafe\n'
356
- lab = [data['max_key'], 'Other 18 categories']
357
  if (data['is_flagged']):
358
- col=[self._color_danger, self._color_mid_gray]
359
  elif (data['is_safer_flagged']):
360
- col=[self._color_warning, self._color_mid_gray]
361
- lab = ['Relative Score:\n'+data['max_key'], 'Other 18 categories']
362
- title='\nBased On Your Personalized Safer Settings,\nThe Message Is Flagged As Unsafe\n'
363
  else:
364
- col=[self._color_success, self._color_mid_gray]
365
- lab = ['False Negative:\n'+data['max_key'], 'Other 18 categories']
366
- title='\nThe Message Is Safe\n'
367
  canvas = self._draw_donut(x, lab, col, exp,title)
368
  return canvas
369
  #
370
- @add_method(HFace_Pluto)
371
  def _draw_donut(self,data,labels,col, exp,title):
372
- # col = [self._color_danger, self._color_secondary]
373
  # exp = (0.01, 0.01)
374
  # Create a pie chart
375
  canvas, pic = matplotlib.pyplot.subplots()
@@ -392,41 +910,142 @@ def _draw_donut(self,data,labels,col, exp,title):
392
  # canvas.show()
393
  return canvas
394
  #
395
- @add_method(HFace_Pluto)
396
- def censor_me(self, msg, safer=0.0005):
 
 
397
  yjson = self._censor_me(msg,safer)
398
  _canvas = self._draw_censor(yjson)
399
  _yjson = json.dumps(yjson, indent=4)
400
- return (_canvas, _yjson)
401
- # %%writefile -a app.py
 
 
402
 
403
- in_box = [gradio.Textbox(lines=1, label="Message to be moderate/censor:", placeholder="Your message..."),
404
- gradio.Slider(0.001, .05, value=0.005, step=.005,label="Your Personalize Safer Value:")]
405
- out_box = [gradio.Plot(label="Moderate/Censor Score: (Red, Orange, and Green)"),
406
- gradio.Textbox(lines=4, label="Response Raw JSON Data:")]
 
 
 
 
 
 
 
 
 
 
 
 
407
  #
408
- title = "Friendly Text Moderate <br>- Identify 19 categories of hate, violent, and sexual language."
409
- desc = 'NOTE: The lower value for the Safer setting indicates a more stringent level of censorship.'
410
- # desc += "<br>There are 19 different categories of hate, violent, and sexual language that can be identified."
411
- arti = "<ul><li>This model use the NLP OpenAI Moderation model."
412
- arti += "</li><li>Creator: Duc Haba</li><li>License: GNU General Public License 3.0</li></ul>"
413
- exp = [
414
- ['Write a blog about the following if only Mexican stop coming here, then our home would be safer.',0.001],
415
- ['Gay homosexual people should stay in the closet, or we will hang them.',0.001],
416
- ['I will jump off the bridge at midnight.',0.0005],
417
- ['Hey, baby girl. How about you and I go and rock the bed.',0.001],
418
- ['F**-#k you. Eat s-it and dik.',0.001],
419
- ['Please tell me a joke about a chicken.',0.001],
420
- ['Five score years ago, a great American, in whose symbolic shadow we stand today, signed the Emancipation Proclamation. This momentous decree came as a great beacon light of hope to millions of Negro slaves who had been seared in the flames of withering injustice. It came as a joyous daybreak to end the long night of their captivity.',0.005],
421
- ]
422
- # %%writefile -a app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
423
 
424
- ginterface = gradio.Interface(fn=monty.censor_me,
425
- inputs=in_box,
426
- outputs=out_box,
427
- examples=exp,
428
- title=title,
429
- description=desc,
430
- article=arti
431
- )
432
- ginterface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # [BEGIN OF pluto_happy]
 
2
  ## required lib, required "pip install"
 
 
 
3
  import torch
4
  import cryptography
5
  import cryptography.fernet
6
+ from flopth import flopth
 
7
  import huggingface_hub
8
  import huggingface_hub.hf_api
9
  ## standard libs, no need to install
 
17
  import psutil
18
  import threading
19
  import socket
20
+ import PIL
21
+ import pandas
22
  import matplotlib
23
+ import numpy
24
+ import importlib.metadata
25
+ import types
26
+ import cpuinfo
27
+ import pynvml
28
+ import pathlib
29
+ import re
30
+ import subprocess
31
+ # define class Pluto_Happy
32
+ class Pluto_Happy(object):
33
+ """
34
+ The Pluto projects starts with fun AI hackings and become a part of my
35
+ first book "Data Augmentation with Python" with Packt Publishing.
36
+
37
+ In particular, Pluto_Happy is a clean and lite kernel of a simple class,
38
+ and using @add_module decoractor to add in specific methods to be a new class,
39
+ such as Pluto_HFace with a lot more function on HuggingFace, LLM and Transformers.
40
+
41
+ Args:
42
+ name (str): the display name, e.g. "Hanna the seeker"
43
+
44
+ Returns:
45
+ (object): the class instance.
46
+ """
47
+
48
  # initialize the object
49
  def __init__(self, name="Pluto",*args, **kwargs):
50
+ super(Pluto_Happy, self).__init__(*args, **kwargs)
51
  self.author = "Duc Haba"
52
  self.name = name
53
  self._ph()
 
57
  self._ph()
58
  #
59
  # define class var for stable division
60
+ #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  self.fname_id = 0
62
  self.dname_img = "img_colab/"
63
+ self.flops_per_sec_gcolab_cpu = 4887694725 # 925,554,209 | 9,276,182,810 | 1,722,089,747 | 5,287,694,725
64
+ self.flops_per_sec_gcolab_gpu = 6365360673 # 1,021,721,764 | 9,748,048,188 | 2,245,406,502 | 6,965,360,673
65
+ self.fname_requirements = './pluto_happy/requirements.txt'
66
+ #
67
+ self.color_primary = '#2780e3' #blue
68
+ self.color_secondary = '#373a3c' #dark gray
69
+ self.color_success = '#3fb618' #green
70
+ self.color_info = '#9954bb' #purple
71
+ self.color_warning = '#ff7518' #orange
72
+ self.color_danger = '#ff0039' #red
73
+ self.color_mid_gray = '#495057'
74
+ self._xkeyfile = '.xoxo'
75
  return
76
  #
77
  # pretty print output name-value line
78
  def _pp(self, a, b,is_print=True):
79
+
80
+ """
81
+ Pretty print output name-value line
82
+
83
+ Args:
84
+ a (str) :
85
+ b (str) :
86
+ is_print (bool): whether to print the header or footer lines to console or return a str.
87
+
88
+ Returns:
89
+ y : None or output as (str)
90
+
91
+ """
92
  # print("%34s : %s" % (str(a), str(b)))
93
  x = f'{"%34s" % str(a)} : {str(b)}'
94
  y = None
 
100
  #
101
  # pretty print the header or footer lines
102
  def _ph(self,is_print=True):
103
+ """
104
+ Pretty prints the header or footer lines.
105
+
106
+ Args:
107
+ is_print (bool): whether to print the header or footer lines to console or return a str.
108
+
109
+ Return:
110
+ y : None or output as (str)
111
+
112
+ """
113
  x = f'{"-"*34} : {"-"*34}'
114
  y = None
115
  if (is_print):
 
123
  hf_names,
124
  hf_space="duchaba/monty",
125
  local_dir="/content/"):
126
+ """
127
+ Given a list of huggingface file names, download them from the provided huggingface space.
128
+
129
+ Args:
130
+ hf_names: (list) list of huggingface file names to download
131
+ hf_space: (str) huggingface space to download from.
132
+ local_dir: (str) local directory to store the files.
133
+
134
+ Returns:
135
+ status: (bool) True if download was successful, False otherwise.
136
+ """
137
+ status = True
138
+ # f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
139
  try:
140
  for f in hf_names:
141
  lo = local_dir + f
142
+ huggingface_hub.hf_hub_download(repo_id=hf_space,
143
+ filename=f,
144
+ use_auth_token=True,
145
+ repo_type=huggingface_hub.REPO_TYPE_SPACE,
146
  force_filename=lo)
147
  except:
148
  self._pp("*Error", f)
149
+ status = False
150
+ return status
151
  #
152
+ # push files to huggingface
153
  def push_hface_files(self,
154
  hf_names,
155
  hf_space="duchaba/skin_cancer_diagnose",
156
  local_dir="/content/"):
157
+ # push files to huggingface space
158
+
159
+ """
160
+ Pushes files to huggingface space.
161
+
162
+ The function takes a list of file names as a
163
+ paramater and pushes to the provided huggingface space.
164
+
165
+ Args:
166
+ hf_names: list(of strings), list of file names to be pushed.
167
+ hf_space: (str), the huggingface space to push to.
168
+ local_dir: (str), the local directory where the files
169
+ are stored.
170
+
171
+ Returns:
172
+ status: (bool) True if successfully pushed else False.
173
+ """
174
+ status = True
175
  try:
176
  for f in hf_names:
177
  lo = local_dir + f
 
182
  repo_type=huggingface_hub.REPO_TYPE_SPACE)
183
  except Exception as e:
184
  self._pp("*Error", e)
185
+ status = False
186
+ return status
187
+ #
188
+ # push the folder to huggingface space
189
+ def push_hface_folder(self,
190
+ hf_folder,
191
+ hf_space_id,
192
+ hf_dest_folder=None):
193
+
194
+ """
195
+
196
+ This function pushes the folder to huggingface space.
197
+
198
+ Args:
199
+ hf_folder: (str). The path to the folder to push.
200
+ hf_space_id: (str). The space id to push the folder to.
201
+ hf_dest_folder: (str). The destination folder in the space. If not specified,
202
+ the folder name will be used as the destination folder.
203
+
204
+ Returns:
205
+ status: (bool) True if the folder is pushed successfully, otherwise False.
206
+ """
207
+
208
+ status = True
209
+ try:
210
+ api = huggingface_hub.HfApi()
211
+ api.upload_folder(folder_path=hf_folder,
212
+ repo_id=hf_space_id,
213
+ path_in_repo=hf_dest_folder,
214
+ repo_type="space")
215
+ except Exception as e:
216
+ self._pp("*Error: ",e)
217
+ status = False
218
+ return status
219
+ #
220
+ # automatically restart huggingface space
221
+ def restart_hface_periodically(self):
222
+
223
+ """
224
+ This function restarts the huggingface space automatically in random
225
+ periodically.
226
+
227
+ Args:
228
+ None
229
+
230
+ Returns:
231
+ None
232
+ """
233
+
234
+ while True:
235
+ random_time = random.randint(15800, 21600)
236
+ time.sleep(random_time)
237
+ os.execl(sys.executable, sys.executable, *sys.argv)
238
+ return
239
+ #
240
+ # log into huggingface
241
+ def login_hface(self, key=None):
242
+
243
+ """
244
+ Log into HuggingFace.
245
+
246
+ Args:
247
+ key: (str, optional) If key is set, this key will be used to log in,
248
+ otherwise the key will be decrypted from the key file.
249
+
250
+ Returns:
251
+ None
252
+ """
253
+
254
+ if (key is None):
255
+ x = self._decrypt_it(self._huggingface_crkey)
256
+ else:
257
+ x = key
258
+ huggingface_hub.login(x, add_to_git_credential=True) # non-blocking login
259
+ self._ph()
260
  return
261
  #
262
  # Define a function to display available CPU and RAM
263
+ def fetch_info_system(self):
264
+
265
+ """
266
+ Fetches system information, such as CPU usage and memory usage.
267
+
268
+ Args:
269
+ None.
270
+
271
+ Returns:
272
+ s: (str) A string containing the system information.
273
+ """
274
+
275
  s=''
276
  # Get CPU usage as a percentage
277
  cpu_usage = psutil.cpu_percent()
 
281
  mem_total_gb = mem.total / (1024 ** 3)
282
  mem_available_gb = mem.available / (1024 ** 3)
283
  mem_used_gb = mem.used / (1024 ** 3)
284
+ # save the results
 
285
  s += f"Total memory: {mem_total_gb:.2f} GB\n"
286
  s += f"Available memory: {mem_available_gb:.2f} GB\n"
287
  # print(f"Used memory: {mem_used_gb:.2f} GB")
288
  s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n"
289
+ try:
290
+ cpu_info = cpuinfo.get_cpu_info()
291
+ s += f'CPU type: {cpu_info["brand_raw"]}, arch: {cpu_info["arch"]}\n'
292
+ s += f'Number of CPU cores: {cpu_info["count"]}\n'
293
+ s += f"CPU usage: {cpu_usage}%\n"
294
+ s += f'Python version: {cpu_info["python_version"]}'
295
+ except Exception as e:
296
+ s += f'CPU type: Not accessible, Error: {e}'
297
  return s
298
  #
299
+ # fetch GPU RAM info
300
+ def fetch_info_gpu(self):
301
+
302
+ """
303
+ Function to fetch GPU RAM info
304
+
305
+ Args:
306
+ None.
307
+
308
+ Returns:
309
+ s: (str) GPU RAM info in human readable format.
310
+ """
311
+
 
 
312
  s=''
313
+ mtotal = 0
314
+ mfree = 0
315
  try:
316
+ nvml_handle = pynvml.nvmlInit()
317
+ devices = pynvml.nvmlDeviceGetCount()
318
+ for i in range(devices):
319
+ device = pynvml.nvmlDeviceGetHandleByIndex(i)
320
+ memory_info = pynvml.nvmlDeviceGetMemoryInfo(device)
321
+ mtotal += memory_info.total
322
+ mfree += memory_info.free
323
+ mtotal = mtotal / 1024**3
324
+ mfree = mfree / 1024**3
325
+ # print(f"GPU {i}: Total Memory: {memory_info.total/1024**3} GB, Free Memory: {memory_info.free/1024**3} GB")
326
+ s += f'GPU type: {torch.cuda.get_device_name(0)}\n'
327
+ s += f'GPU ready staus: {torch.cuda.is_available()}\n'
328
+ s += f'Number of GPUs: {devices}\n'
329
+ s += f'Total Memory: {mtotal:.2f} GB\n'
330
+ s += f'Free Memory: {mfree:.2f} GB\n'
331
+ s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,2)} GB\n'
332
+ s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,2)} GB\n'
333
  except Exception as e:
334
  s += f'**Warning, No GPU: {e}'
335
  return s
336
  #
337
+ # fetch info about host ip
338
+ def fetch_info_host_ip(self):
339
+ """
340
+ Function to fetch current host name and ip address
341
+
342
+ Args:
343
+ None.
344
+
345
+ Returns:
346
+ s: (str) host name and ip info in human readable format.
347
+ """
348
+ s=''
349
+ try:
350
+ hostname = socket.gethostname()
351
+ ip_address = socket.gethostbyname(hostname)
352
+ s += f"Hostname: {hostname}\n"
353
+ s += f"IP Address: {ip_address}\n"
354
+ except Exception as e:
355
+ s += f"**Warning, No hostname: {e}"
356
+ return s
357
+ #
358
+ # fetch files name
359
+ def fetch_file_names(self,directory, file_extension=None):
360
+ """
361
+ This function gets all the filenames with a given extension.
362
+ Args:
363
+ directory (str):
364
+ directory path to scan for files in.
365
+ file_extension (list):
366
+ file extension to look for or "None" (default) to get all files.
367
+ Returns:
368
+ filenames (list):
369
+ list of strings containing the filenames with the given extension.
370
+ """
371
+ filenames = []
372
+ for (root, subFolders, files) in os.walk(directory):
373
+ for fname in files:
374
+ if (file_extension is None):
375
+ filenames.append(os.path.join(root, fname))
376
+ else:
377
+ for ext in file_extension:
378
+ if fname.endswith(ext):
379
+ filenames.append(os.path.join(root, fname))
380
+ return filenames
381
+ #
382
+ # fetch the crypto key
383
+ def _fetch_crypt(self,has_new_key=False):
384
+
385
+ """
386
+ This function fetches the crypto key from the file or from the
387
+ variable created previously in the class.
388
+ Args:
389
+ has_new_key (bool):
390
+ is_generate flag to indicate whether the key should be
391
+ use as-is or fetch from the file.
392
+ Returns:
393
+ s (str):
394
+ string value containing the crypto key.
395
+ """
396
+ if self._fkey == 'your_key_goes_here':
397
+ raise Exception('Cryto Key is not correct!')
398
+ #
399
+ s=self._fkey[::-1]
400
+ if (has_new_key):
401
  s=open(self._xkeyfile, "rb").read()
402
+ self._fkey = s[::-1]
403
  return s
404
  #
405
+ # generate new cryto key
406
+ def gen_key(self):
407
+ """
408
+ This function generates a new cryto key and saves it to a file
409
+
410
+ Args:
411
+ None
412
+
413
+ Returns:
414
+ (str) crypto key
415
+ """
416
+
417
  key = cryptography.fernet.Fernet.generate_key()
418
  with open(self._xkeyfile, "wb") as key_file:
419
+ key_file.write(key[::-1]) # write in reversed
420
+ return key
421
  #
422
+ # decrypt message
423
+ def decrypt_it(self, x):
424
+ """
425
+ Decrypts the encrypted string using the stored crypto key.
426
+
427
+ Args:
428
+ x: (str) to be decrypted.
429
+
430
+ Returns:
431
+ x: (str) decrypted version of x.
432
+ """
433
  y = self._fetch_crypt()
434
  f = cryptography.fernet.Fernet(y)
435
  m = f.decrypt(x)
436
  return m.decode()
437
  #
438
+ # encrypt message
439
+ def encrypt_it(self, x):
440
+ """
441
+ encrypt message
442
+
443
+ Args:
444
+ x (str): message to encrypt
445
+
446
+ Returns:
447
+ str: encrypted message
448
+ """
449
+
450
  key = self._fetch_crypt()
451
  p = x.encode()
452
  f = cryptography.fernet.Fernet(key)
453
  y = f.encrypt(p)
454
  return y
455
  #
456
+ # fetch import libraries
457
+ def _fetch_lib_import(self):
458
+
459
+ """
460
+ This function fetches all the imported libraries that are installed.
461
+
462
+ Args:
463
+ None
464
+
465
+ Returns:
466
+ x (list):
467
+ list of strings containing the name of the imported libraries.
468
+ """
469
+
470
+ x = []
471
+ for name, val in globals().items():
472
+ if isinstance(val, types.ModuleType):
473
+ x.append(val.__name__)
474
+ x.sort()
475
+ return x
476
+ #
477
+ # fetch lib version
478
+ def _fetch_lib_version(self,lib_name):
479
+
480
+ """
481
+ This function fetches the version of the imported libraries.
482
+
483
+ Args:
484
+ lib_name (list):
485
+ list of strings containing the name of the imported libraries.
486
+
487
+ Returns:
488
+ val (list):
489
+ list of strings containing the version of the imported libraries.
490
+ """
491
+
492
+ val = []
493
+ for x in lib_name:
494
+ try:
495
+ y = importlib.metadata.version(x)
496
+ val.append(f'{x}=={y}')
497
+ except Exception as e:
498
+ val.append(f'|{x}==unknown_*or_system')
499
+ val.sort()
500
+ return val
501
+ #
502
+ # fetch the lib name and version
503
+ def fetch_info_lib_import(self):
504
+ """
505
+ This function fetches all the imported libraries name and version that are installed.
506
+
507
+ Args:
508
+ None
509
+
510
+ Returns:
511
+ x (list):
512
+ list of strings containing the name and version of the imported libraries.
513
+ """
514
+ x = self._fetch_lib_version(self._fetch_lib_import())
515
+ return x
516
+ #
517
+ # write a file to local or cloud diskspace
518
+ def write_file(self,fname, in_data):
519
+
520
+ """
521
+ Write a file to local or cloud diskspace or append to it if it already exists.
522
+
523
+ Args:
524
+ fname (str): The name of the file to write.
525
+ in_data (list): The
526
+
527
+ This is a utility function that writes a file to disk.
528
+ The file name and text to write are passed in as arguments.
529
+ The file is created, the text is written to it, and then the file is closed.
530
+
531
+ Args:
532
+ fname (str): The name of the file to write.
533
+ in_data (list): The text to write to the file.
534
+
535
+ Returns:
536
+ None
537
+ """
538
+
539
+ if os.path.isfile(fname):
540
+ f = open(fname, "a")
541
+ else:
542
+ f = open(fname, "w")
543
+ f.writelines("\n".join(in_data))
544
+ f.close()
545
+ return
546
+ #
547
+ # fetch flops info
548
+ def fetch_info_flops(self,model, input_shape=(1, 3, 224, 224), device="cpu", max_epoch=1):
549
+
550
+ """
551
+ Calculates the number of floating point operations (FLOPs).
552
+
553
+ Args:
554
+ model (torch.nn.Module): neural network model.
555
+ input_shape (tuple): input tensor size.
556
+ device (str): device to perform computation on.
557
+ max_epoch (int): number of times
558
+
559
+ Returns:
560
+ (float): number of FLOPs, average from epoch, default is 1 epoch.
561
+ (float): elapsed seconds
562
+ (list): of string for a friendly human readable output
563
+ """
564
+
565
+ ttm_input = torch.rand(input_shape, dtype=torch.float32, device=device)
566
+ # ttm_input = torch.rand((1, 3, 224, 224), dtype=torch.float32, device=device)
567
+ tstart = time.time()
568
+ for i in range(max_epoch):
569
+ flops, params = flopth(model, inputs=(ttm_input,), bare_number=True)
570
+ tend = time.time()
571
+ etime = (tend - tstart)/max_epoch
572
+
573
+ # kilo = 10^3, maga = 10^6, giga = 10^9, tera=10^12, peta=10^15, exa=10^18, zetta=10^21
574
+ valstr = []
575
+ valstr.append(f'Tensors device: {device}')
576
+ valstr.append(f'flops: {flops:,}')
577
+ valstr.append(f'params: {params:,}')
578
+ valstr.append(f'epoch: {max_epoch}')
579
+ valstr.append(f'sec: {etime}')
580
+ # valstr += f'Tensors device: {device}, flops: {flops}, params: {params}, epoch: {max_epoch}, sec: {etime}\n'
581
+ x = flops/etime
582
+ y = (x/10**15)*86400
583
+ valstr.append(f'Flops/s: {x:,}')
584
+ valstr.append(f'PetaFlops/s: {x/10**15}')
585
+ valstr.append(f'PetaFlops/day: {y}')
586
+ valstr.append(f'1 PetaFlopsDay (on this system will take): {round(1/y, 2):,.2f} days')
587
+ return flops, etime, valstr
588
+ #
589
+ def print_petaflops(self):
590
+
591
+ """
592
+ Prints the flops and peta-flops-day calculation.
593
+ **WARING**: This method will break/interfer with Stable Diffusion use of LoRA.
594
+ I can't debug why yet.
595
+
596
+ Args:
597
+ None
598
+
599
+ Returns:
600
+ None
601
+ """
602
+ self._pp('Model', 'TTM, Tiny Torch Model on: CPU')
603
+ mtoy = TTM()
604
+ # my_model = MyModel()
605
+ dev = torch.device("cuda:0")
606
+ a,b,c = self.fetch_info_flops(mtoy)
607
+ y = round((a/b)/self.flops_per_sec_gcolab_cpu * 100, 2)
608
+ self._pp('Flops', f'{a:,} flops')
609
+ self._pp('Total elapse time', f'{b:,} seconds')
610
+ self._pp('Flops compared', f'{y:,}% of Google Colab Pro')
611
+ for i, val in enumerate(c):
612
+ self._pp(f'Info {i}', val)
613
  self._ph()
614
+
615
+ try:
616
+ self._pp('Model', 'TTM, Tiny Torch Model on: GPU')
617
+ dev = torch.device("cuda:0")
618
+ a2,b2,c2 = self.fetch_info_flops(mtoy, device=dev)
619
+ y2 = round((a2/b2)/self.flops_per_sec_gcolab_gpu * 100, 2)
620
+ self._pp('Flops', f'{a2:,} flops')
621
+ self._pp('Total elapse time', f'{b2:,} seconds')
622
+ self._pp('Flops compared', f'{y2:,}% of Google Colab Pro')
623
+ d2 = round(((a2/b2)/(a/b))*100, 2)
624
+ self._pp('Flops GPU compared', f'{d2:,}% of CPU (or {round(d2-100,2):,}% faster)')
625
+ for i, val in enumerate(c2):
626
+ self._pp(f'Info {i}', val)
627
+ except Exception as e:
628
+ self._pp('Error', e)
629
+ self._ph()
630
  return
631
  #
632
+ #
633
+ def fetch_installed_libraries(self):
634
+ """
635
+ Retrieves and prints the names and versions of Python libraries installed by the user,
636
+ excluding the standard libraries.
 
 
 
637
 
638
+ Args:
639
+ -----
640
+ None
641
+
642
+ Returns:
643
+ --------
644
+ dictionary: (dict)
645
+ A dictionary where keys are the names of the libraries and values are their respective versions.
646
+
647
+ Examples:
648
+ ---------
649
+ libraries = get_installed_libraries()
650
+ for name, version in libraries.items():
651
+ print(f"{name}: {version}")
652
+ """
653
+ # List of standard libraries (this may not be exhaustive and might need updates based on the Python version)
654
+ # Run pip freeze command to get list of installed packages with their versions
655
+ result = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)
656
+
657
+ # Decode result and split by lines
658
+ packages = result.stdout.decode('utf-8').splitlines()
659
+
660
+ # Split each line by '==' to separate package names and versions
661
+ installed_libraries = {}
662
+ for package in packages:
663
+ try:
664
+ name, version = package.split('==')
665
+ installed_libraries[name] = version
666
+ except Exception as e:
667
+ #print(f'{package}: Error: {e}')
668
+ pass
669
+ return installed_libraries
670
  #
 
 
 
 
 
 
 
671
  #
672
+ def fetch_match_file_dict(self, file_path, reference_dict):
 
673
  """
674
+ Reads a file from the disk, creates an array with each line as an item,
675
+ and checks if each line exists as a key in the provided dictionary. If it exists,
676
+ the associated value from the dictionary is also returned.
677
 
678
  Parameters:
679
+ -----------
680
+ file_path: str
681
+ Path to the file to be read.
682
+ reference_dict: dict
683
+ Dictionary against which the file content (each line) will be checked.
 
684
 
685
  Returns:
686
+ --------
687
+ dict:
688
+ A dictionary where keys are the lines from the file and values are either
689
+ the associated values from the reference dictionary or None if the key
690
+ doesn't exist in the dictionary.
691
+
692
+ Raises:
693
+ -------
694
+ FileNotFoundError:
695
+ If the provided file path does not exist.
696
+ """
697
+
698
+ if not os.path.exists(file_path):
699
+ raise FileNotFoundError(f"The file at {file_path} does not exist.")
700
+
701
+ with open(file_path, 'r') as file:
702
+ lines = file.readlines()
703
+
704
+ # Check if each line (stripped of whitespace and newline characters) exists in the reference dictionary.
705
+ # If it exists, fetch its value. Otherwise, set the value to None.
706
+ results = {line.strip(): reference_dict.get(line.strip().replace('_', '-'), None) for line in lines}
707
+
708
+ return results
709
+ # print fech_info about myself
710
+ def print_info_self(self):
711
+
712
+ """
713
+ Prints information about the model/myself.
714
+
715
+ Args:
716
+ None
717
+
718
+ Returns:
719
+ None
720
+ """
721
+
722
+ self._ph()
723
+ self._pp("Hello, I am", self.name)
724
+ self._pp("I will display", "Python, Jupyter, and system info.")
725
+ self._pp("For complete doc type", "help(pluto) ...or help(your_object_name)")
726
+ self._pp('.','.')
727
+ self._pp("...", "Β―\_(ツ)_/Β―")
728
+ self._ph()
729
+ # system
730
+ self._pp('System', 'Info')
731
+ x = self.fetch_info_system()
732
+ print(x)
733
+ self._ph()
734
+ # gpu
735
+ self._pp('GPU', 'Info')
736
+ x = self.fetch_info_gpu()
737
+ print(x)
738
+ self._ph()
739
+ # lib used
740
+ self._pp('Installed lib from', self.fname_requirements)
741
+ self._ph()
742
+ x = self.fetch_match_file_dict(self.fname_requirements, self.fetch_installed_libraries())
743
+ for item, value in x.items():
744
+ self._pp(f'{item} version', value)
745
+ self._ph()
746
+ self._pp('Standard lib from', 'System')
747
+ self._ph()
748
+ self._pp('matplotlib version', matplotlib.__version__)
749
+ self._pp('numpy version', numpy.__version__)
750
+ self._pp('pandas version',pandas.__version__)
751
+ self._pp('PIL version', PIL.__version__)
752
+ self._pp('torch version', torch.__version__)
753
+ self._ph()
754
+ # host ip
755
+ self._pp('Host', 'Info')
756
+ x = self.fetch_info_host_ip()
757
+ print(x)
758
+ self._ph()
759
+ #
760
  return
761
  #
762
+ #
763
+ # define TTM for use in calculating flops
764
+ class TTM(torch.nn.Module):
765
+
766
+ """
767
+ Tiny Torch Model (TTM)
768
+
769
+ This is a toy model consisting of four convolutional layers.
770
+
771
+ Args:
772
+ input_shape (tuple): input tensor size.
773
+
774
+ Returns:
775
+ (tensor): output of the model.
776
+ """
777
+
778
+ def __init__(self, input_shape=(1, 3, 224, 224)):
779
+ super(TTM, self).__init__()
780
+ self.conv1 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)
781
+ self.conv2 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)
782
+ self.conv3 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)
783
+ self.conv4 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)
784
+
785
+ def forward(self, x1):
786
+ x1 = self.conv1(x1)
787
+ x1 = self.conv2(x1)
788
+ x1 = self.conv3(x1)
789
+ x1 = self.conv4(x1)
790
+ return x1
791
+ #
792
+ # (end of class TTM)
793
  # add module/method
794
  #
795
  import functools
 
802
  return func # returning func means func can still be used normally
803
  return decorator
804
  #
805
+ # [END OF pluto_happy]
806
+ ## %%write app.py
807
+ import openai
808
+ import gradio
809
+ # %%write -a app.py
810
+
811
+ # wake up monty
812
+ monty = Pluto_Happy('Monty, shares or steal')
813
+ # %%write -a app.py
814
+
815
+ # check out my environments
816
+
817
+ # monty.fname_requirements = 'pluto_happy/requirements.txt'
818
+ # monty.print_info_self()
819
+ # %%write -a app.py
820
+
821
+ monty._huggingface_key=b'gAAAAABld_3fKLl7aPBJzfAq-th37t95pMu2bVbH9QccOSecaUnm33XrpKpCXP4GL6Wr23g3vtrKWli5JK1ZPh18ilnDb_Su6GoVvU92Vzba64k3gBQwKF_g5DoH2vWq2XM8vx_5mKJh'
822
+ monty._kaggle_key=b'gAAAAABld_4_B6rrRhFYyfl77dacu1RhR4ktaLU6heYhQBSIj4ELBm7y4DzU1R8-H4yPKd0w08s11wkFJ9AR7XyESxM1SsrMBzqQEeW9JKNbl6jAaonFGmqbhFblkQqH4XjsapZru0qX'
823
+ monty._fkey="fes_f8Im569hYnI1Tn6FqP-6hS4rdmNOJ6DWcRPOsvc="
824
+ monty._fkey=monty._fkey[::-1]
825
+ monty._ok=b'gAAAAABld_-y70otUll4Jwq3jEBXiw1tooSFo_gStRbkCyuu9_Dmdehc4M8lI_hFbum9CwyZuj9ZnXgxFIROebcPSF5qoA197VRvzUDQOMxY5zmHnImVROrsXVdZqXyIeYH_Q6cvXvFTX3rLBIKKWgvJmnpYGRaV6Q=='
826
+
827
  # %%write -a app.py
828
 
829
  # client.moderations.create()
830
+ ai_client = openai.OpenAI(api_key=monty.decrypt_it(monty._ok))
831
+ # %%write -a app.py
832
 
833
+ fname = 'toxic_data.csv'
834
+ monty.df_toxic_data = pandas.read_csv(fname)
835
+ # %%writefile -a app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
836
  #
837
  # # for openai version 1.3.8
838
+ @add_method(Pluto_Happy)
839
+ #
840
  def _fetch_moderate_engine(self):
841
+ self.ai_client = openai.OpenAI(api_key=self.decrypt_it(self._ok))
842
  self.text_model = "text-moderation-latest"
843
  return
844
  #
845
+ @add_method(Pluto_Happy)
846
+ #
847
  def _censor_me(self, p, safer=0.0005):
848
  self._fetch_moderate_engine()
849
  resp_orig = self.ai_client.moderations.create(input=p, model=self.text_model)
 
865
  v1['message'] = p
866
  return v1
867
  #
868
+ @add_method(Pluto_Happy)
869
  def _draw_censor(self,data):
870
  self._color_mid_gray = '#6c757d'
871
  exp = (0.01, 0.01)
872
+ x = [data['max_value'], (1-data['max_value'])]
873
+ title=f"\nUnsafe: {data['max_key']}: {(data['max_value']*100):.2f}% Confidence\n"
874
+ lab = [data['max_key'], 'Other 13 categories']
875
  if (data['is_flagged']):
876
+ col=[self.color_danger, self.color_mid_gray]
877
  elif (data['is_safer_flagged']):
878
+ col=[self.color_warning, self.color_mid_gray]
879
+ lab = ['Relative Score:\n'+data['max_key'], 'Other 13 categories']
880
+ title=f"\nPersonal Unsafe: {data['max_key']}: {(data['max_value']*100):.2f}% Confidence\n"
881
  else:
882
+ col=[self.color_mid_gray, self.color_success]
883
+ lab = ['False Negative:\n'+data['max_key'], 'Other 13 categories']
884
+ title='\nSafe Message\n'
885
  canvas = self._draw_donut(x, lab, col, exp,title)
886
  return canvas
887
  #
888
+ @add_method(Pluto_Happy)
889
  def _draw_donut(self,data,labels,col, exp,title):
890
+ # col = [self.color_danger, self._color_secondary]
891
  # exp = (0.01, 0.01)
892
  # Create a pie chart
893
  canvas, pic = matplotlib.pyplot.subplots()
 
910
  # canvas.show()
911
  return canvas
912
  #
913
+ @add_method(Pluto_Happy)
914
+ # def censor_me(self, msg, safer=0.02, ibutton_1=0):
915
+ def censor_me(self, msg, safer):
916
+ # safer=0.2
917
  yjson = self._censor_me(msg,safer)
918
  _canvas = self._draw_censor(yjson)
919
  _yjson = json.dumps(yjson, indent=4)
920
+ # return (_canvas, _yjson)
921
+ return(_canvas)
922
+ # %%write -a app.py
923
+ # result from a lot of prompt AI and old fashion try and error
924
 
925
+ # print(gradio.__version__)
926
+ import random
927
+
928
+ def say_hello(val):
929
+ return f"Hello: {val}"
930
+ def say_toxic():
931
+ return f"I am toxic"
932
+ def fetch_toxic_tweets(maxi=2):
933
+ sample_df = monty.df_toxic_data.sample(maxi)
934
+ is_true = random.choice([True, False])
935
+ c1 = "more_toxic"
936
+ if is_true:
937
+ c1 = "less_toxic"
938
+ toxic1 = sample_df[c1].iloc[0]
939
+ # toxic1 = "cat eats my homework."
940
+ return sample_df.to_html(index=False), toxic1
941
  #
942
+ # define all gradio widget/components outside the block for easy to visualize the blocks structure
943
+ #
944
+ in1 = gradio.Textbox(lines=3, label="Enter Text:")
945
+ in2 = gradio.Slider(0.005, .1, value=0.02, step=.005,label="Personalize Safer Value: (larger value is less safe)")
946
+ out1 = gradio.Plot(label="Output:")
947
+ out2 = gradio.HTML(label="Real-world Toxic Posts/Tweets: *WARNING")
948
+ out3 = gradio.Textbox(lines=5, label="Output JSON:")
949
+ but1 = gradio.Button("Measure 14 Toxicity", variant="primary",size="sm")
950
+ but2 = gradio.Button("Fetch Toxic Text", variant="stop", size="sm")
951
+ #
952
+ txt1 = """
953
+ # πŸ˜ƒ Welcome Friendly Text Moderation
954
+
955
+ ### Identify 14 categories of text toxicity.
956
+
957
+ >The purpose of this NLP (Natural Language Processing) AI demonstration is to prevent profanity, vulgarity, hate speech, violence, sexism, and any other offensive language.
958
+ >It is **not an act of censorship**, as the final UI (User Interface) will give the reader, but not a young reader, the option to click on a label to read the toxic message.
959
+ >The goal is to create a safer and more respectful environment for you, your colleages, and your family.
960
+ ---
961
+ ### 🌴 Helpful Instruction:
962
+
963
+ 1. Enter your [harmful] message in the input box.
964
+
965
+ 2. Click the "Measure 14 Toxicity" button.
966
+ 3. View the result on the Donut plot.
967
+ 4. (**Optional**) Click on the "Fetch Real World Toxic Dataset" below.
968
+ 5. Please find below the explanation of additional options available.
969
+ """
970
+ txt2 = """
971
+ ## 🌻 Author and Developer Notes:
972
+ ---
973
+ - The demo uses the cutting-edge (2024) AI Natural Language Processing (NLP) model from OpenAI.
974
+ - It is not a Generative (GenAI) model, such as Google Gemini or GPT-4.
975
+ - The NLP understands the message context, nuance, innuendo, and not just swear words.
976
+ - We **challenge you** to trick it, i.e., write a toxic tweet or post, but our AI thinks it is safe. If you win, please send us your message.
977
+ - The 14 toxicity categories are as follows:
978
+
979
+ 1. harassment
980
+ 2. harassment threatening
981
+ 3. harassment instructions
982
+ 4. hate
983
+ 5. hate threatening
984
+ 6. hate instructions
985
+ 7. self harm
986
+ 8. self harm instructions
987
+ 9. self harm intent
988
+ 10. self harm minor
989
+ 11. sexual
990
+ 12. sexual minors
991
+ 13. violence
992
+ 14. violence graphic
993
+
994
+ - If the NLP model classifies the message as "safe," you can still limit the level of toxicity by using the "Personal Safe" slider.
995
+ - The smaller the personal-safe value, the stricter the limitation. It means that if you're a young or sensitive adult, you should choose a lower personal-safe value, less than 0.02, to ensure you're not exposed to harmful content.
996
+ - The color of the donut plot is as follows:
997
+ - Red is an "unsafe" message by the NLP model
998
+ - Green is a "safe" message
999
+ - Yellow is an "unsafe" message by your toxicity level
1000
+
1001
+ - The real-world dataset is from the Jigsaw Rate Severity of Toxic Comments on Kaggle. It has 30,108 records.
1002
+ - The intent is to share with Duc's friends and colleagues, but for those with nefarious intent, this Text Moderation model is governed by the GNU 3.0 License: https://www.gnu.org/licenses/gpl-3.0.en.html
1003
+ - Author: **Duc Haba, 2024**
1004
+ """
1005
+ txt3 = """
1006
+ ## πŸ’₯ WARNING: WARNING:
1007
+ ---
1008
 
1009
+ - The following button will retrieve **real-world** offensive posts from Twitter and customer reviews from consumer companies.
1010
+ - The button will display four toxic messages at a time. **Click again** for four more random messages.
1011
+ - They contain **profanity, vulgarity, hate, violence, sexism, and other offensive language.**
1012
+ - After you fetch the toxic messages, Click on the **"Measure 14 Toxicity" button**.
1013
+ """
1014
+ #reverse_button.click(process_text, inputs=text_input, outputs=reversed_text)
1015
+ #
1016
+
1017
+ with gradio.Blocks() as gradio_app:
1018
+ # title
1019
+ gradio.Markdown(txt1) # any html or simple mark up
1020
+ #
1021
+ # first row, has two columns 1/3 size and 2/3 size
1022
+ with gradio.Row(): # items inside rows are columns
1023
+ # left column
1024
+ with gradio.Column(scale=1): # items under columns are row, scale is 1/3 size
1025
+ # left column has two rows, text entry, and buttons
1026
+ in1.render()
1027
+ in2.render()
1028
+ but1.render()
1029
+ but1.click(monty.censor_me, inputs=[in1, in2], outputs=out1)
1030
+
1031
+ with gradio.Column(scale=2):
1032
+ out1.render()
1033
+ #
1034
+ # second row is warning text
1035
+ with gradio.Row():
1036
+ gradio.Markdown(txt3)
1037
+
1038
+ # third row is fetching toxic data
1039
+ with gradio.Row():
1040
+ with gradio.Column(scale=1):
1041
+ but2.render()
1042
+ but2.click(fetch_toxic_tweets, inputs=None, outputs=[out2, in1])
1043
+ with gradio.Column(scale=2):
1044
+ out2.render()
1045
+
1046
+ # fourth row is note text
1047
+ with gradio.Row():
1048
+ gradio.Markdown(txt2)
1049
+ # %%write -a app.py
1050
+ # open/launch it
1051
+ gradio_app.launch()