InfosysResponsibleAiToolKit commited on
Commit
d1b3684
·
1 Parent(s): 4984f6e
Files changed (38) hide show
  1. src/.env +10 -0
  2. src/config/__init__.py +0 -0
  3. src/config/config.py +36 -0
  4. src/config/logger.py +166 -0
  5. src/constants/__init__.py +0 -0
  6. src/constants/__pycache__/__init__.cpython-311.pyc +0 -0
  7. src/constants/__pycache__/__init__.cpython-39.pyc +0 -0
  8. src/constants/__pycache__/local_constants.cpython-311.pyc +0 -0
  9. src/constants/__pycache__/local_constants.cpython-39.pyc +0 -0
  10. src/constants/local_constants.py +27 -0
  11. src/dao/AdminDb.py +76 -0
  12. src/dao/__pycache__/AdminDb.cpython-311.pyc +0 -0
  13. src/dao/__pycache__/AdminDb.cpython-39.pyc +0 -0
  14. src/dao/temp.txt +24 -0
  15. src/exception/__init__.py +0 -0
  16. src/exception/__pycache__/__init__.cpython-311.pyc +0 -0
  17. src/exception/__pycache__/__init__.cpython-39.pyc +0 -0
  18. src/exception/__pycache__/exception.cpython-311.pyc +0 -0
  19. src/exception/__pycache__/exception.cpython-39.pyc +0 -0
  20. src/exception/exception.py +48 -0
  21. src/exestep.txt +10 -0
  22. src/logger.ini +5 -0
  23. src/logs/rai/responsible-ai-servicelogs_20240221_174013.log +0 -0
  24. src/logs/rai/responsible-ai-servicelogs_20240221_174501.log +0 -0
  25. src/main.py +58 -0
  26. src/main_test.py +494 -0
  27. src/mapper/__init__.py +1 -0
  28. src/mapper/__pycache__/__init__.cpython-39.pyc +0 -0
  29. src/mapper/__pycache__/mapper.cpython-39.pyc +0 -0
  30. src/mapper/mapper.py +31 -0
  31. src/project/__init__.py +0 -0
  32. src/routing/__init__.py +0 -0
  33. src/routing/router.py +307 -0
  34. src/routing/safety_router.py +53 -0
  35. src/service/__init__.py +0 -0
  36. src/service/safety_service.py +35 -0
  37. src/service/service.py +364 -0
  38. src/static/swagger.json +710 -0
src/.env ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ workers=1
2
+
3
+ WORKERS="${workers}"
4
+ # DB_NAME="${dbname}"
5
+ # DB_USERNAME="${username}"
6
+ # DB_PWD="${password}"
7
+ # DB_IP="${ipaddress}"
8
+ # DB_PORT="${port}"
9
+ # MONGO_PATH="mongodb://${DB_USERNAME}:${DB_PWD}@${DB_IP}:${DB_PORT}/"
10
+ # MONGO_PATH= "mongodb://localhost:27017/"
src/config/__init__.py ADDED
File without changes
src/config/config.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ from configparser import ConfigParser
12
+ import yaml
13
+
14
+
15
+ def readConfig(section,filename):
16
+ # create a parser
17
+ parser = ConfigParser()
18
+ # read config file
19
+ parser.read(filename)
20
+
21
+ # get section, default to postgresql
22
+ db = {}
23
+ if parser.has_section(section):
24
+ params = parser.items(section)
25
+ for param in params:
26
+ db[param[0]] = param[1]
27
+ else:
28
+ raise Exception('Section {0} not found in the {1} file'.format(section, filename))
29
+
30
+ return db
31
+
32
+
33
+ def read_config_yaml(filename):
34
+ with open(filename) as config_file:
35
+ config_details = yaml.safe_load(config_file)
36
+ return config_details
src/config/logger.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ import datetime
12
+ import logging
13
+ import os
14
+ import sys
15
+ from .config import readConfig
16
+ import contextvars
17
+
18
+ request_id_var = contextvars.ContextVar("request_id_var")
19
+
20
+ class CustomLogger(logging.getLoggerClass()):
21
+ def __init__(self):
22
+ """Create a custom logger with the specified `name`. When `log_dir` is None, a simple
23
+ console logger is created. Otherwise, a file logger is created in addition to the console
24
+ logger.
25
+
26
+ By default, the five standard logging levels (DEBUG through CRITICAL) only display
27
+ information in the log file if a file handler is added to the logger, but **not** to the
28
+ console.
29
+ :param name: name for the logger
30
+ :param verbose: bool: whether the logging should be verbose; if True, then all messages get
31
+ logged both to stdout and to the log file (if `log_dir` is specified); if False, then
32
+ messages only get logged to the log file (if `log_dir` is specified)
33
+ :param log_dir: str: (optional) the directory for the log file; if not present, no log file
34
+ is created
35
+ """
36
+ # Create custom logger logging all five levels
37
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
38
+ log_cfg_path = os.path.join(BASE_DIR, 'logger.ini')
39
+ log_params = readConfig('logDetails', log_cfg_path)
40
+ name = log_params['file_name']
41
+ try:
42
+ verbose = bool(log_params['verbose'])
43
+ except:
44
+ verbose = False
45
+
46
+ log_dir = str(log_params['log_dir'])
47
+
48
+ super().__init__(name)
49
+ self.setLevel(logging.DEBUG)
50
+
51
+ # Add new logging level
52
+ logging.addLevelName(logging.INFO, 'INFO')
53
+
54
+ # Determine verbosity settings
55
+ self.verbose = verbose
56
+
57
+ # Create stream handler for logging to stdout (log all five levels)
58
+ self.stdout_handler = logging.StreamHandler(sys.stdout)
59
+ self.stdout_handler.setLevel(logging.DEBUG)
60
+ self.stdout_handler.setFormatter(logging.Formatter('%(message)s'))
61
+ self.enable_console_output()
62
+
63
+ self.file_handler = None
64
+ if log_dir:
65
+ self.add_file_handler(name, log_dir)
66
+
67
+ def add_file_handler(self, name, log_dir):
68
+ """Add a file handler for this logger with the specified `name` (and store the log file
69
+ under `log_dir`)."""
70
+ # Format for file log
71
+ fmt = '%(asctime)s | %(levelname)9s | %(filename)s:%(lineno)d | %(user_id)s | %(message)s '
72
+ formatter = logging.Formatter(fmt)
73
+
74
+ # Determine log path and file name; create log path if it does not exist
75
+ now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
76
+ log_name = f'{str(name).replace(" ", "_")}_{now}'
77
+ if not os.path.exists(log_dir):
78
+ try:
79
+ os.makedirs(log_dir)
80
+ except:
81
+ print(f'{self.__class__.__name__}: Cannot create directory {log_dir}. ',
82
+ end='', file=sys.stderr)
83
+ log_dir = '/tmp' if sys.platform.startswith('linux') else '.'
84
+ print(f'Defaulting to {log_dir}.', file=sys.stderr)
85
+
86
+ log_file = os.path.join(log_dir, log_name) + '.log'
87
+
88
+ # Create file handler for logging to a file (log all five levels)
89
+ self.file_handler = logging.FileHandler(log_file)
90
+ self.file_handler.setLevel(logging.DEBUG)
91
+ self.file_handler.setFormatter(formatter)
92
+ self.addHandler(self.file_handler)
93
+
94
+ def has_console_handler(self):
95
+ return len([h for h in self.handlers if type(h) == logging.StreamHandler]) > 0
96
+
97
+ def has_file_handler(self):
98
+ return len([h for h in self.handlers if isinstance(h, logging.FileHandler)]) > 0
99
+
100
+ def disable_console_output(self):
101
+ if not self.has_console_handler():
102
+ return
103
+ self.removeHandler(self.stdout_handler)
104
+
105
+ def enable_console_output(self):
106
+ if self.has_console_handler():
107
+ return
108
+ self.addHandler(self.stdout_handler)
109
+
110
+ def disable_file_output(self):
111
+ if not self.has_file_handler():
112
+ return
113
+ self.removeHandler(self.file_handler)
114
+
115
+ def enable_file_output(self):
116
+ if self.has_file_handler():
117
+ return
118
+ self.addHandler(self.file_handler)
119
+
120
+ def framework(self, msg, *args, **kwargs):
121
+ """Logging method for the FRAMEWORK level. The `msg` gets logged both to stdout and to file
122
+ (if a file handler is present), irrespective of verbosity settings."""
123
+ return super().info(msg, *args, **kwargs)
124
+
125
+ def _custom_log(self, func, msg, *args, **kwargs):
126
+ """Helper method for logging DEBUG through CRITICAL messages by calling the appropriate
127
+ `func()` from the base class."""
128
+ # Log normally if verbosity is on
129
+ if self.verbose:
130
+ return func(msg, *args, **kwargs)
131
+
132
+ # If verbosity is off and there is no file handler, there is nothing left to do
133
+ if not self.has_file_handler():
134
+ return
135
+
136
+ # If verbosity is off and a file handler is present, then disable stdout logging, log, and
137
+ # finally reenable stdout logging
138
+ self.disable_console_output()
139
+ func(msg, *args, **kwargs)
140
+ self.enable_console_output()
141
+
142
+ def getSeesionId():
143
+ try:
144
+ request_id = request_id_var.get()
145
+ except Exception as e:
146
+ request_id = "StartUp"
147
+ return request_id
148
+
149
+ def debug(self, msg, *args, **kwargs ):
150
+ self._custom_log(super().debug, str(datetime.datetime.now())+" : "+msg,extra = {'user_id':CustomLogger.getSeesionId()}, *args, **kwargs)
151
+
152
+ def info(self, msg, *args, **kwargs):
153
+ self._custom_log(super().info, str(datetime.datetime.now())+" : "+msg,extra = {'user_id':CustomLogger.getSeesionId()}, *args, **kwargs)
154
+
155
+ def warning(self, msg,user_id=None, *args, **kwargs):
156
+ self._custom_log(super().warning,str(datetime.datetime.now())+" : "+msg,extra = {'user_id':CustomLogger.getSeesionId()}, *args, **kwargs)
157
+
158
+ def error(self, msg,user_id=None, *args, **kwargs):
159
+ self._custom_log(super().error,str(datetime.datetime.now())+" : "+msg,extra = {'user_id':CustomLogger.getSeesionId()}, *args, **kwargs)
160
+
161
+ def critical(self, msg,user_id=None, *args, **kwargs):
162
+ self._custom_log(super().critical,str(datetime.datetime.now())+" : "+msg,extra = {'user_id':CustomLogger.getSeesionId()}, *args, **kwargs)
163
+
164
+
165
+ if __name__ == "__main__":
166
+ CustomLogger()
src/constants/__init__.py ADDED
File without changes
src/constants/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (201 Bytes). View file
 
src/constants/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (195 Bytes). View file
 
src/constants/__pycache__/local_constants.cpython-311.pyc ADDED
Binary file (673 Bytes). View file
 
src/constants/__pycache__/local_constants.cpython-39.pyc ADDED
Binary file (636 Bytes). View file
 
src/constants/local_constants.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ """
12
+ fileName: local_constants.py
13
+ description: Local constants for usecase module
14
+ """
15
+
16
+ DELTED_SUCCESS_MESSAGE="Successfully deleted the usecase :"
17
+ USECASE_ALREADY_EXISTS= "Usecase with name PLACEHOLDER_TEXT already exists"
18
+ USECASE_NOT_FOUND_ERROR="Usecase id PLACEHOLDER_TEXT Not Found"
19
+ USECASE_NAME_VALIDATION_ERROR="Usecase name should not be empty"
20
+ SPACE_DELIMITER=" "
21
+ PLACEHOLDER_TEXT="PLACEHOLDER_TEXT"
22
+ HTTP_STATUS_BAD_REQUEST=500
23
+ HTTP_STATUS_NOT_FOUND=404
24
+ HTTP_STATUS_409_CODE=409
25
+ HTTP_422_UNPROCESSABLE_ENTITY="422"
26
+ HTTP_415_UNSUPPORTED_MEDIA_TYPE=415
27
+ UNSUPPPORTED_MEDIA_TYPE_ERROR="Unsupported media type: "
src/dao/AdminDb.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ import os
12
+ import pymongo
13
+
14
+ from dotenv import load_dotenv
15
+ import sys
16
+ load_dotenv()
17
+ import json
18
+ import requests
19
+ import urllib.parse
20
+
21
+ import requests
22
+ import json
23
+ import traceback
24
+ import os
25
+
26
+ import logging
27
+
28
+ class AttributeDict(dict):
29
+ __getattr__ = dict.__getitem__
30
+ __setattr__ = dict.__setitem__
31
+ __delattr__ = dict.__delitem__
32
+
33
+
34
+ myclient = pymongo.MongoClient(os.getenv("MONGO_PATH"))
35
+ print(myclient)
36
+ # dbname = os.getenv("APP_MONGO_DBNAME")
37
+ dbname = os.getenv("DB_NAME")
38
+
39
+ class DB:
40
+ def connect():
41
+ try:
42
+ # myclient = pymongo.MongoClient(os.getenv("MONGO_PATH"))
43
+ # mydb = myclient[os.getenv("DB_NAME")]
44
+ mydb = myclient[dbname]
45
+
46
+ return mydb
47
+ except Exception as e:
48
+ logging.error("error in DB connection")
49
+ logging.error(str(e))
50
+ sys.exit()
51
+
52
+ mydb=DB.connect()
53
+
54
+
55
+ class Results:
56
+ mycol = mydb["moderationtelemetrydata"]
57
+ logdb=mydb["Logdb"]
58
+ # mycol = mydb["Results"]
59
+ mycol2 = mydb["Results"]
60
+ # mycol2 = mydb["Resultswithfeedback"]
61
+
62
+
63
+ def createlog(value):
64
+
65
+ try:
66
+ print(value)
67
+ PtrnRecogCreatedData = Results.logdb.insert_one(value)
68
+ print("PtrnRecogCreatedData.acknowledged",PtrnRecogCreatedData.acknowledged)
69
+ return PtrnRecogCreatedData.acknowledged
70
+ except Exception as e:
71
+ logging.error("Error occured in Results create")
72
+ logging.error(f"Exception: {e}")
73
+
74
+
75
+
76
+
src/dao/__pycache__/AdminDb.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
src/dao/__pycache__/AdminDb.cpython-39.pyc ADDED
Binary file (1.85 kB). View file
 
src/dao/temp.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ pydantic
3
+ uvicorn
4
+ certifi
5
+ FastAPI-SQLAlchemy
6
+ pip
7
+ PyYAML
8
+ pandas
9
+ python-multipart
10
+ pymongo
11
+ python-dotenv
12
+ requests
13
+ requests-file
14
+ setuptools
15
+ SQLAlchemy
16
+ starlette
17
+ typer
18
+ typing_extensions
19
+ urllib3
20
+ wasabi
21
+ #https://huggingface.co/spacy/en_core_web_lg/resolve/main/en_core_web_lg-any-py3-none-any.whl
22
+ #../lib/presidio_analyzer-4.0.6-py3-none-any.whl
23
+ ../lib/aicloudlibs-0.1.0-py3-none-any.whl
24
+ #../lib/en_core_web_lg-any-py3-none-any.whl
src/exception/__init__.py ADDED
File without changes
src/exception/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (201 Bytes). View file
 
src/exception/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (195 Bytes). View file
 
src/exception/__pycache__/exception.cpython-311.pyc ADDED
Binary file (3.02 kB). View file
 
src/exception/__pycache__/exception.cpython-39.pyc ADDED
Binary file (2.2 kB). View file
 
src/exception/exception.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ """
12
+ fileName: exception.py
13
+ description: handles usecase module specific exception
14
+ """
15
+ import sys, traceback
16
+ from constants.local_constants import SPACE_DELIMITER,PLACEHOLDER_TEXT,USECASE_ALREADY_EXISTS,USECASE_NOT_FOUND_ERROR,USECASE_NAME_VALIDATION_ERROR
17
+ from constants import local_constants as global_constants
18
+
19
+ from abc import ABC
20
+
21
+
22
+ class modeldeploymentException(Exception, ABC):
23
+ """
24
+ description: Abstract base class of UsecaseException.
25
+ """
26
+
27
+ def __init__(self, detail: str) -> None:
28
+ self.status_code = global_constants.HTTP_STATUS_BAD_REQUEST
29
+ super().__init__(detail)
30
+
31
+
32
+ class modeldeploymentNotFoundError(modeldeploymentException):
33
+ """
34
+ description: UsecaseNotFoundError thrown by usecase service
35
+ when the requested usecase details not found for a specific user.
36
+ """
37
+ def __init__(self,name):
38
+ self.status_code = global_constants.HTTP_STATUS_NOT_FOUND
39
+ self.detail = USECASE_NOT_FOUND_ERROR.replace(PLACEHOLDER_TEXT,name)
40
+
41
+ class modeldeploymentNameNotEmptyError(modeldeploymentException):
42
+ """
43
+ description: UsecaseNameNotEmptyError thrown by create usecase service
44
+ when the requested usecase details not having usecase name.
45
+ """
46
+ def __init__(self,name):
47
+ self.status_code = global_constants.HTTP_STATUS_409_CODE
48
+ self.detail = USECASE_NAME_VALIDATION_ERROR
src/exestep.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ // MIT license https://opensource.org/licenses/MIT
2
+ // Copyright 2024 Infosys Ltd
3
+ //
4
+ // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+ //
6
+ // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+ //
8
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+
10
+ pyinstaller --onedir --add-data "C:\WORK\GIT\responsible-ai-mm-flask\myenv\Lib\site-packages;." --add-data "C:\WORK\GIT\responsible-ai-mm-flask\models;models" main.py
src/logger.ini ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [logDetails]
2
+ LOG_LEVEL=ERROR
3
+ FILE_NAME=responsible-ai-servicelogs
4
+ VERBOSE=False
5
+ LOG_DIR=/responsible-ai/logs
src/logs/rai/responsible-ai-servicelogs_20240221_174013.log ADDED
File without changes
src/logs/rai/responsible-ai-servicelogs_20240221_174501.log ADDED
File without changes
src/main.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ import os
12
+ from flask import Flask,request,jsonify
13
+ from flask_swagger_ui import get_swaggerui_blueprint
14
+ from routing.router import router
15
+ from config.logger import CustomLogger,request_id_var
16
+ from waitress import serve
17
+ from werkzeug.exceptions import HTTPException,UnsupportedMediaType,BadRequest
18
+ # from mapper.mapper import *
19
+
20
+ SWAGGER_URL = '/rai/v1/raimoderationmodels/docs'
21
+ API_URL = '/static/swagger.json'
22
+
23
+ swaggerui_blueprint = get_swaggerui_blueprint(
24
+ SWAGGER_URL,
25
+ API_URL
26
+ )
27
+
28
+ log=CustomLogger()
29
+ app = Flask(__name__)
30
+ app.register_blueprint(swaggerui_blueprint)
31
+ app.register_blueprint(router,url_prefix='/rai/v1/raimoderationmodels')
32
+
33
+
34
+ def handle_http_exception(exc):
35
+ """Handles HTTP exceptions, returning a JSON response."""
36
+ response = jsonify({"error": exc.description})
37
+ response.status_code = exc.code
38
+ return response
39
+
40
+ @app.errorhandler(HTTPException)
41
+ def handle_all_http_exceptions(exc):
42
+ """Global exception handler for HTTP errors."""
43
+ return handle_http_exception(exc)
44
+
45
+ def handle_unsupported_mediatype(exc):
46
+ """Handles unsupported media type exceptions."""
47
+ return jsonify({"error": "Unsupported media type"}), 415 # 415 Unsupported Media Type
48
+
49
+ @app.errorhandler(UnsupportedMediaType)
50
+ def handle_all_unsupported_mediatype_exceptions(exc):
51
+ """Global exception handler for unsupported media types."""
52
+ return handle_unsupported_mediatype(exc)
53
+
54
+
55
+ if __name__ == "__main__":
56
+ serve(app, host='0.0.0.0', port=7860, threads=int(os.getenv('THREADS',1)),connection_limit=int(os.getenv('CONNECTION_LIMIT',500)), channel_timeout=int(os.getenv('CHANNEL_TIMEOUT',120)))
57
+ #app.run()
58
+
src/main_test.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ import pickle
12
+ import torch
13
+ import os
14
+ import time
15
+ import logging
16
+ from flask import Flask, render_template, request, jsonify
17
+ from flask import g
18
+ from datetime import datetime
19
+ import json
20
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
21
+ from sentence_transformers import SentenceTransformer,util
22
+ from detoxify import Detoxify
23
+ from presidio_analyzer import AnalyzerEngine, RecognizerRegistry
24
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
25
+ from dao.AdminDb import Results
26
+ from werkzeug.exceptions import HTTPException,BadRequest,UnprocessableEntity,InternalServerError
27
+ from tqdm.auto import tqdm
28
+ from fastapi.encoders import jsonable_encoder
29
+
30
+
31
+ import numpy as np
32
+
33
+ import traceback
34
+ import uuid
35
+ from waitress import serve
36
+ from mapper.mapper import *
37
+ import contextvars
38
+
39
+ app = Flask(__name__)
40
+ print("before loading model")
41
+ request_id_var = contextvars.ContextVar("request_id_var")
42
+ #pipe = StableDiffusionPipeline.from_pretrained('/model/stablediffusion/fp32/model')
43
+ device = "cuda"
44
+ registry = RecognizerRegistry()
45
+ registry.load_predefined_recognizers()
46
+ analyzer_engine = AnalyzerEngine(registry=registry)
47
+
48
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
49
+ gpu=0 if torch.cuda.is_available() else -1
50
+ check_point = 'toxic_debiased-c7548aa0.ckpt'
51
+ toxicityModel = Detoxify(checkpoint='../models/detoxify/'+ check_point,
52
+ device=device,
53
+ huggingface_config_path='../models/detoxify')
54
+
55
+ PromptModel_dberta = AutoModelForSequenceClassification.from_pretrained("../models/dbertaInjection").to(device)
56
+ Prompttokens_dberta = AutoTokenizer.from_pretrained("../models/dbertaInjection")
57
+
58
+ topictokenizer_Facebook = AutoTokenizer.from_pretrained("../models/facebook")
59
+ topicmodel_Facebook = AutoModelForSequenceClassification.from_pretrained("../models/facebook").to(device)
60
+
61
+ topictokenizer_dberta = AutoTokenizer.from_pretrained("../models/restricted-dberta-large-zeroshot")
62
+ topicmodel_dberta = AutoModelForSequenceClassification.from_pretrained("../models/restricted-dberta-large-zeroshot").to(device)
63
+
64
+ # classifier = pipeline("zero-shot-classification",model="../models/facebook",device=device)
65
+ # classifier2 = pipeline("zero-shot-classification",model="../models/restricted-dberta-large-zeroshot",device=device)
66
+ encoder = SentenceTransformer("../models/multi-qa-mpnet-base-dot-v1").to(device)
67
+ jailbreakModel = encoder
68
+ similarity_model =encoder
69
+ request_id_var.set("Startup")
70
+ log_dict={}
71
+ print("model loaded")
72
+
73
+ @app.errorhandler(HTTPException)
74
+ def handle_exception(e):
75
+ """Return JSON instead of HTML for HTTP errors."""
76
+ # start with the correct headers and status code from the error
77
+ response = e.get_response()
78
+ # replace the body with JSON
79
+ response.data = json.dumps({
80
+ "code": e.code,
81
+ "details": e.description,
82
+ })
83
+ response.content_type = "application/json"
84
+ return response
85
+
86
+ @app.errorhandler(UnprocessableEntity)
87
+ def validation_error_handler(exc):
88
+ """Return JSON instead of HTML for HTTP errors."""
89
+ # start with the correct headers and status code from the error
90
+ response = exc.get_response()
91
+ print(response)
92
+ # replace the body with JSON
93
+ exc_code_desc=exc.description.split("-")
94
+ exc_code=int(exc_code_desc[0])
95
+ exc_desc=exc_code_desc[1]
96
+ response.data = json.dumps({
97
+ "code": exc_code,
98
+ "details": exc_desc,
99
+ })
100
+ response.content_type = "application/json"
101
+ return response
102
+
103
+ @app.errorhandler(InternalServerError)
104
+ def validation_error_handler(exc):
105
+ """Return JSON instead of HTML for HTTP errors."""
106
+ # start with the correct headers and status code from the error
107
+ response = exc.get_response()
108
+ print(response)
109
+ # replace the body with JSON
110
+ response.data = json.dumps({
111
+ "code": 500,
112
+ "details": "Some Error Occurred ,Please try Later",
113
+ })
114
+ response.content_type = "application/json"
115
+ return response
116
+
117
+ @app.route("/rai/v2test/raimoderationmodels/detoxifymodel",methods=[ 'POST'])
118
+ def toxic_model():
119
+ st=time.time()
120
+
121
+ try:
122
+
123
+ id=uuid.uuid4().hex
124
+ payload=request.get_json()
125
+ request_id_var.set(id)
126
+ logging.info("before invoking toxic_model service ")
127
+ log_dict[request_id_var.get()]=[]
128
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
129
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
130
+ response = toxicity_check(payload,id)
131
+
132
+ logging.info("after invoking toxic_model service ")
133
+
134
+ er=log_dict[request_id_var.get()]
135
+ logobj = {"_id":id,"error":er}
136
+
137
+
138
+ if len(er)!=0:
139
+ Results.createlog(logobj)
140
+ del log_dict[id]
141
+ logging.debug("response : " + str(response))
142
+ logging.info("exit toxic_model routing method")
143
+ logging.info(f"Time taken by toxicity {time.time()-st}")
144
+ return jsonable_encoder(response)
145
+ except UnprocessableEntity as cie:
146
+ logging.error(cie.__dict__)
147
+ logging.info("exit toxic_model routing method")
148
+ raise UnprocessableEntity(**cie.__dict__)
149
+ except Exception as cie:
150
+ logging.error(cie.__dict__)
151
+ logging.info("exit toxic_model routing method")
152
+ raise HTTPException()
153
+
154
+ @app.route("/rai/v2test/raimoderationmodels/privacy",methods=[ 'POST'])
155
+ def pii_check():
156
+ st=time.time()
157
+ logging.info("Entered pii_check routing method")
158
+ try:
159
+
160
+ id=uuid.uuid4().hex
161
+ payload=request.get_json()
162
+ request_id_var.set(id)
163
+ logging.info("before invoking create usecase service ")
164
+ log_dict[request_id_var.get()]=[]
165
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0) or payload['entitiesselected'] is None or (payload['entitiesselected'] is not None and len(payload['entitiesselected'])==0):
166
+ raise UnprocessableEntity("1021-invalid input!")
167
+ response = privacy(id,payload['text'],payload['entitiesselected'])
168
+ logging.info("after invoking create usecase service ")
169
+ er=log_dict[request_id_var.get()]
170
+ logobj = {"_id":id,"error":er}
171
+ if len(er)!=0:
172
+ Results.createlog(logobj)
173
+ del log_dict[id]
174
+ logging.debug("response : " + str(response))
175
+ # logging.debug("response : " + str(response))
176
+ logging.info("exit pii_check routing method")
177
+ logging.info(f"Time taken by privacy {time.time()-st}")
178
+ return jsonable_encoder(response)
179
+ except Exception as cie:
180
+ logging.error(cie.__dict__)
181
+ logging.info("exit pii_check routing method")
182
+ raise HTTPException()
183
+
184
+ @app.route("/rai/v2test/raimoderationmodels/promptinjectionmodel",methods=[ 'POST'])
185
+ def prompt_model():
186
+ st=time.time()
187
+ logging.info("Entered prompt_model routing method")
188
+ try:
189
+
190
+ id=uuid.uuid4().hex
191
+ payload=request.get_json()
192
+ request_id_var.set(id)
193
+ logging.info("before invoking prompt_model service")
194
+ log_dict[request_id_var.get()]=[]
195
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
196
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
197
+ response = promptInjection_check(payload['text'],id)
198
+ logging.info("after invoking prompt_model service ")
199
+ er=log_dict[request_id_var.get()]
200
+ logobj = {"_id":id,"error":er}
201
+ if len(er)!=0:
202
+ Results.createlog(logobj)
203
+ del log_dict[id]
204
+ logging.debug("response : " + str(response))
205
+ # logging.debug("response : " + str(response))
206
+ logging.info("exit prompt_model routing method")
207
+ logging.info(f"Time taken by promptinjection {time.time()-st}")
208
+ return jsonable_encoder(response)
209
+ except Exception as cie:
210
+ logging.error(cie.__dict__)
211
+ logging.info("exit prompt_model routing method")
212
+ raise HTTPException()
213
+
214
+ @app.route("/rai/v2test/raimoderationmodels/restrictedtopicmodel",methods=[ 'POST'])
215
+ def restrictedTopic_model():
216
+ st=time.time()
217
+ logging.info("Entered restrictedTopic_model routing method")
218
+ try:
219
+ id=uuid.uuid4().hex
220
+ payload=request.get_json()
221
+ request_id_var.set(id)
222
+ logging.info("before invoking restrictedTopic_model service ")
223
+ log_dict[request_id_var.get()]=[]
224
+
225
+ label_cond = payload['labels'] is None or (payload['labels'] is not None and len(payload['labels'])==0)
226
+ model_cond=False
227
+ # print("--")
228
+ if("model" in payload):
229
+ model_cond = payload['model'] is None or (payload['model'] is not None and len(payload['model'])==0)
230
+ # print("==")
231
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0) or label_cond or model_cond:
232
+ raise UnprocessableEntity("1021-invalid input ")
233
+ response = restricttopic_check(payload,id)
234
+ logging.info("after invoking restrictedTopic_model service ")
235
+ er=log_dict[request_id_var.get()]
236
+ logobj = {"_id":id,"error":er}
237
+ if len(er)!=0:
238
+ Results.createlog(logobj)
239
+ del log_dict[id]
240
+ logging.debug("response : " + str(response))
241
+ # logging.debug("response : " + str(response))
242
+ logging.info("exit restrictedTopic_model routing method")
243
+ logging.info(f"Time taken by RestrictedTopic{time.time()-st}")
244
+ # print(type(response))
245
+ # print(type(jsonable_encoder(response)))
246
+ return jsonable_encoder(response)
247
+ except Exception as cie:
248
+
249
+ logging.error(cie.__dict__)
250
+ logging.info("exit restrictedTopic_model routing method")
251
+ raise HTTPException()
252
+
253
+ @app.route("/rai/v2test/raimoderationmodels/multi_q_net_embedding",methods=[ 'POST'])
254
+ def embedding_model():
255
+ st=time.time()
256
+ logging.info("Entered embedding_model routing method")
257
+ try:
258
+
259
+ id=uuid.uuid4().hex
260
+ payload=request.get_json()
261
+ request_id_var.set(id)
262
+ logging.info("before invoking embedding_model service ")
263
+ log_dict[request_id_var.get()]=[]
264
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
265
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
266
+ response = multi_q_net_embedding(id,payload['text'])
267
+ logging.info("after invoking embedding_model service ")
268
+ er=log_dict[request_id_var.get()]
269
+ logobj = {"_id":id,"error":er}
270
+ if len(er)!=0:
271
+ Results.createlog(logobj)
272
+ del log_dict[id]
273
+ logging.debug("response : " + str(response))
274
+ # logging.debug("response : " + str(response))
275
+ logging.info("exit embedding_model routing method")
276
+ logging.info(f"Time taken by Jailbreak {time.time()-st}")
277
+ return jsonable_encoder(response)
278
+ except Exception as cie:
279
+ logging.error(cie.__dict__)
280
+ logging.info("exit embedding_model routing method")
281
+ raise HTTPException()
282
+
283
+ @app.route("/rai/v2test/raimoderationmodels/multi-qa-mpnet-model_similarity",methods=[ 'POST'])
284
+ def similarity_model():
285
+ st=time.time()
286
+ logging.info("Entered similarity_model routing method")
287
+ try:
288
+
289
+ id=uuid.uuid4().hex
290
+ request_id_var.set(id)
291
+ logging.info("before invoking similarity_model service ")
292
+ payload=request.get_json()
293
+ log_dict[request_id_var.get()]=[]
294
+ text1_cond = payload['text1'] is None or (payload['text1'] is not None and len(payload['text1'])==0)
295
+ text2_cond = payload['text2'] is None or (payload['text2'] is not None and len(payload['text2'])==0)
296
+ emb1_cond = payload['emb1'] is None or (payload['emb1'] is not None and len(payload['emb1'])==0)
297
+ emb2_cond = payload['emb2'] is None or (payload['emb2'] is not None and len(payload['emb2'])==0)
298
+ if text1_cond or text2_cond or emb1_cond or emb2_cond:
299
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
300
+ response = multi_q_net_similarity(id,payload['text1'],payload['text2'],payload['emb1'],payload['emb2'])
301
+ logging.info("after invoking similarity_model service ")
302
+ er=log_dict[request_id_var.get()]
303
+ logobj = {"_id":id,"error":er}
304
+ if len(er)!=0:
305
+ Results.createlog(logobj)
306
+ del log_dict[id]
307
+ logging.debug("response : " + str(response))
308
+ # logging.debug("response : " + str(response))
309
+ logging.info("exit similarity_model routing method")
310
+ logging.info(f"Time taken by similary{time.time()-st}")
311
+ return jsonable_encoder(response)
312
+ except Exception as cie:
313
+ logging.error(cie.__dict__)
314
+ logging.info("exit similarity_model routing method")
315
+ raise HTTPException()
316
+
317
+
318
+ def privacy(id,text,PIIenities_selected=None):
319
+ try:
320
+ analyzer_results = analyzer_engine.analyze(text=text, language="en",entities=PIIenities_selected)
321
+ entityList= []
322
+ anyz_res = jsonable_encoder(analyzer_results)
323
+ for i in anyz_res:
324
+ entityList.append(i['entity_type'])
325
+ return anyz_res,jsonable_encoder(entityList)
326
+ except Exception as e:
327
+
328
+ logging.error("Error occured in privacy")
329
+ logging.error(f"Exception: {e}")
330
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
331
+ "Error Module":"Failed at privacy call"})
332
+ raise InternalServerError()
333
+
334
+
335
+ def multi_q_net_similarity(id,text1=None,text2=None,emb1=None,emb2=None):
336
+ try:
337
+ if text1:
338
+ with torch.no_grad():
339
+ emb1 = jailbreakModel.encode(text1, convert_to_tensor=True,device=device)
340
+ if text2:
341
+ with torch.no_grad():
342
+ emb2 = jailbreakModel.encode(text2, convert_to_tensor=True,device=device)
343
+
344
+ emb = util.pytorch_cos_sim(emb1, emb2).to("cpu").numpy().tolist()
345
+ del emb1
346
+ del emb2
347
+ #torch.cuda.empty_cache()
348
+ return emb
349
+ except Exception as e:
350
+
351
+ logging.error("Error occured in multi_q_net_similarity")
352
+ logging.error(f"Exception: {e}")
353
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
354
+ "Error Module":"Failed at multi_q_net_similarity call"})
355
+ raise InternalServerError()
356
+
357
+
358
+ def multi_q_net_embedding(id,lst):
359
+ try:
360
+ res = []
361
+ for text in lst:
362
+ with torch.no_grad():
363
+ text_embedding = jailbreakModel.encode(text, convert_to_tensor=True,device=device)
364
+ res.append(text_embedding.to("cpu").numpy().tolist())
365
+
366
+ del text_embedding
367
+ #torch.cuda.empty_cache()
368
+ return res
369
+ # return text_embedding.numpy().tolist()
370
+ except Exception as e:
371
+
372
+ logging.error("Error occured in multi_q_net text embedding")
373
+ logging.error(f"Exception: {e}")
374
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
375
+ "Error Module":"Failed at multi_q_net text embedding call"})
376
+ raise InternalServerError()
377
+
378
+
379
+ def restricttopic_check(payload,id):
380
+ try:
381
+ # topicmodel = topicmodel_Facebook
382
+ # topictokenizer = topictokenizer_Facebook
383
+
384
+ # nlp = pipeline('zero-shot-classification', model=classifier, tokenizer=topictokenizer)
385
+
386
+ text=payload['text']
387
+ labels=payload['labels']
388
+
389
+ model =payload['model'] if hasattr(payload, 'model') else "facebook"
390
+ if model==None:
391
+ model="dberta"
392
+
393
+ if model=="facebook":
394
+ # nlp = classifier
395
+ nlp = pipeline('zero-shot-classification', model=topicmodel_Facebook, tokenizer=topictokenizer_Facebook, device=gpu)
396
+ elif model=="dberta":
397
+ # nlp = classifier2
398
+ nlp = pipeline('zero-shot-classification', model=topicmodel_dberta, tokenizer=topictokenizer_dberta,device=gpu)
399
+ with torch.no_grad():
400
+ output=nlp(text, labels,multi_label=True)
401
+ for i in range(len(output["scores"])):
402
+ output["scores"][i] = round(output["scores"][i],4)
403
+
404
+ del nlp
405
+ #torch.cuda.empty_cache()
406
+ return output
407
+
408
+ except Exception as e:
409
+
410
+ logging.error("Error occured in restricttopic_check")
411
+ logging.error(f"Exception: {e}")
412
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
413
+ "Error Module":"Failed at restricttopic_check call"})
414
+ raise InternalServerError()
415
+
416
+ def toxicity_check(payload,id) :
417
+ try:
418
+ text = payload['text']
419
+ with torch.no_grad():
420
+ output = toxicityModel.predict(text)
421
+ List_profanity_score = []
422
+ obj_profanityScore_toxic = profanityScore(metricName='toxicity',
423
+ metricScore=output['toxicity'])
424
+ obj_profanityScore_severe_toxic = profanityScore(metricName='severe_toxicity',
425
+ metricScore=output['severe_toxicity'])
426
+ obj_profanityScore_obscene = profanityScore(metricName='obscene',
427
+ metricScore=output['obscene'])
428
+ obj_profanityScore_threat = profanityScore(metricName='threat',
429
+ metricScore=output['threat'])
430
+ obj_profanityScore_insult = profanityScore(metricName='insult',
431
+ metricScore=output['insult'])
432
+ obj_profanityScore_identity_attack = profanityScore(metricName='identity_attack',
433
+ metricScore=output['identity_attack'])
434
+ obj_profanityScore_sexual_explicit = profanityScore(metricName='sexual_explicit',
435
+ metricScore=output['sexual_explicit'])
436
+
437
+ List_profanity_score.append(obj_profanityScore_toxic)
438
+ List_profanity_score.append(obj_profanityScore_severe_toxic)
439
+ List_profanity_score.append(obj_profanityScore_obscene)
440
+ List_profanity_score.append(obj_profanityScore_threat)
441
+ List_profanity_score.append(obj_profanityScore_insult)
442
+ List_profanity_score.append(obj_profanityScore_identity_attack)
443
+ List_profanity_score.append(obj_profanityScore_sexual_explicit)
444
+
445
+ objProfanityAnalyzeResponse = {}
446
+ objProfanityAnalyzeResponse['toxicScore'] = List_profanity_score
447
+
448
+ #torch.cuda.empty_cache()
449
+ return objProfanityAnalyzeResponse
450
+
451
+ except Exception as e:
452
+
453
+ logging.error("Error occured in toxicity_check")
454
+ logging.error(f"Exception: {e}")
455
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
456
+ "Error Module":"Failed at toxicity_check call"})
457
+ raise InternalServerError()
458
+
459
+
460
+ def promptInjection_check(text,id):
461
+ try:
462
+
463
+ Prompttokens = Prompttokens_dberta
464
+ PromptModel = PromptModel_dberta
465
+
466
+ tokens = Prompttokens.encode_plus(text, truncation=True, padding=True, return_tensors="pt").to(device)
467
+
468
+ with torch.no_grad():
469
+ outputs = PromptModel(**tokens)
470
+
471
+ predicted_label = outputs.logits.argmax().item()
472
+ label_names = PromptModel.config.id2label
473
+ predicted_label_name = label_names[predicted_label]
474
+ predicted_probabilities = outputs.logits.softmax(dim=1)[0, predicted_label].item()
475
+
476
+ del tokens
477
+ #torch.cuda.empty_cache()
478
+ # #torch.cuda.empty_cache()
479
+ return predicted_label_name,predicted_probabilities
480
+ except Exception as e:
481
+
482
+ logging.error("Error occured in promptInjection_check")
483
+ logging.error(f"Exception: {e}")
484
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
485
+ "Error Module":"Failed at promptInjection_check call"})
486
+ raise InternalServerError()
487
+
488
+ @app.route("/")
489
+ def hello_world():
490
+ return "<h1>Hello, world!</h1>"
491
+
492
+ if __name__ == "__main__":
493
+ serve(app, host='0.0.0.0', port=8000, threads=int(os.getenv('THREADS',1)),connection_limit=int(os.getenv('CONNECTION_LIMIT',500)), channel_timeout=int(os.getenv('CHANNEL_TIMEOUT',120)))
494
+ #app.run()
src/mapper/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
src/mapper/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (192 Bytes). View file
 
src/mapper/__pycache__/mapper.cpython-39.pyc ADDED
Binary file (1.17 kB). View file
 
src/mapper/mapper.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ from pydantic import BaseModel, Field,Extra,ValidationError
12
+ from typing import Optional, Union, List
13
+ from enum import Enum
14
+
15
+
16
+ class profanityScore(BaseModel):
17
+ metricName: str = Field(example="toxicity")
18
+ metricScore: float = Field(example=0.78326)
19
+
20
+ class Config:
21
+ orm_mode = True
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+
30
+
31
+
src/project/__init__.py ADDED
File without changes
src/routing/__init__.py ADDED
File without changes
src/routing/router.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ from flask import Blueprint
12
+ import time
13
+ # import logging
14
+ from flask import request
15
+ # from dao.AdminDb import Results
16
+ from werkzeug.exceptions import HTTPException,UnprocessableEntity
17
+ from tqdm.auto import tqdm
18
+ # from fastapi.encoders import jsonable_encoder
19
+
20
+ from service.service import *
21
+ from config.logger import CustomLogger ,request_id_var
22
+
23
+ import uuid
24
+ from mapper.mapper import *
25
+ import psutil
26
+
27
+ request_id_var.set('Startup')
28
+ router = Blueprint('router', __name__,)
29
+ log=CustomLogger()
30
+
31
+ @router.route("/detoxifymodel",methods=[ 'POST'])
32
+ def toxic_model():
33
+ st=time.time()
34
+ stmem = psutil.Process().memory_info().rss
35
+ id=uuid.uuid4().hex
36
+ request_id_var.set(id)
37
+ try:
38
+
39
+ # id=uuid.uuid4().hex
40
+ payload=request.get_json()
41
+ # request_id_var.set(id)
42
+ log.info("before invoking toxic_model service ")
43
+ log_dict[request_id_var.get()]=[]
44
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
45
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
46
+ response = toxicity_check(payload,id)
47
+
48
+ log.info("after invoking toxic_model service ")
49
+
50
+ er=log_dict[request_id_var.get()]
51
+ logobj = {"_id":id,"error":er}
52
+
53
+
54
+ if len(er)!=0:
55
+ log.debug(str(logobj))
56
+ # print("this is --> ",str(logobj) )
57
+ del log_dict[id]
58
+ log.debug("response : " + str(response))
59
+ log.info("exit toxic_model routing method")
60
+ log.info(f"Time taken by toxicity {time.time()-st}")
61
+ return jsonable_encoder(response)
62
+ except UnprocessableEntity as cie:
63
+ log.error(str(cie.__dict__))
64
+ log.info("exit toxic_model routing method")
65
+ raise UnprocessableEntity(**cie.__dict__)
66
+ except Exception as cie:
67
+ log.error(str(cie.__dict__))
68
+ log.info("exit toxic_model routing method")
69
+ raise HTTPException()
70
+
71
+ @router.route("/privacy",methods=[ 'POST'])
72
+ def pii_check():
73
+ st=time.time()
74
+ id=uuid.uuid4().hex
75
+ request_id_var.set(id)
76
+ log.info("Entered pii_check routing method")
77
+ try:
78
+
79
+ # id=uuid.uuid4().hex
80
+ payload=request.get_json()
81
+ # request_id_var.set(id)
82
+ log.info("before invoking create usecase service ")
83
+ log_dict[request_id_var.get()]=[]
84
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
85
+ raise UnprocessableEntity("1021-invalid input!")
86
+ response = privacy(id,payload['text'])
87
+ log.info("after invoking create usecase service ")
88
+ er=log_dict[request_id_var.get()]
89
+ logobj = {"_id":id,"error":er}
90
+ if len(er)!=0:
91
+ log.debug(str(logobj))
92
+ del log_dict[id]
93
+ log.debug("response : " + str(response))
94
+ # log.debug("response : " + str(response))
95
+ log.info("exit pii_check routing method")
96
+ log.info(f"Time taken by privacy {time.time()-st}")
97
+ return jsonable_encoder(response)
98
+ except UnprocessableEntity as cie:
99
+ log.error(str(cie.__dict__))
100
+ log.info("exit pii_check routing method")
101
+ raise UnprocessableEntity(**cie.__dict__)
102
+ except Exception as cie:
103
+ log.error(str(cie.__dict__))
104
+ log.info("exit pii_check routing method")
105
+ raise HTTPException()
106
+
107
+ @router.route("/promptinjectionmodel",methods=[ 'POST'])
108
+ def prompt_model():
109
+ st=time.time()
110
+ id=uuid.uuid4().hex
111
+ request_id_var.set(id)
112
+ log.info("Entered prompt_model routing method")
113
+ try:
114
+
115
+ # id=uuid.uuid4().hex
116
+ payload=request.get_json()
117
+ # request_id_var.set(id)
118
+ log.info("before invoking prompt_model service")
119
+ log_dict[request_id_var.get()]=[]
120
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
121
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
122
+ response = promptInjection_check(payload['text'],id)
123
+ log.info("after invoking prompt_model service ")
124
+ er=log_dict[request_id_var.get()]
125
+ logobj = {"_id":id,"error":er}
126
+ if len(er)!=0:
127
+ log.debug(str(logobj))
128
+ del log_dict[id]
129
+ log.debug("response : " + str(response))
130
+ # log.debug("response : " + str(response))
131
+ log.info("exit prompt_model routing method")
132
+ log.info(f"Time taken by promptinjection {time.time()-st}")
133
+ return jsonable_encoder(response)
134
+ except UnprocessableEntity as cie:
135
+ log.error(str(cie.__dict__))
136
+ log.info("exit prompt_model routing method")
137
+ raise UnprocessableEntity(**cie.__dict__)
138
+ except Exception as cie:
139
+ log.error(str(cie.__dict__))
140
+ log.info("exit prompt_model routing method")
141
+ raise HTTPException()
142
+
143
+ @router.route("/restrictedtopicmodel",methods=[ 'POST'])
144
+ def restrictedTopic_model():
145
+ st=time.time()
146
+ id=uuid.uuid4().hex
147
+ request_id_var.set(id)
148
+ log.info("Entered restrictedTopic_model routing method")
149
+ try:
150
+ # id=uuid.uuid4().hex
151
+ payload=request.get_json()
152
+ # request_id_var.set(id)
153
+ log.info("before invoking restrictedTopic_model service ")
154
+ log_dict[request_id_var.get()]=[]
155
+
156
+ label_cond = payload['labels'] is None or (payload['labels'] is not None and len(payload['labels'])==0)
157
+ #model_cond=False
158
+ # print("--")
159
+ # if("model" in payload):
160
+ # model_cond = payload['model'] is None or (payload['model'] is not None and len(payload['model'])==0)
161
+ # print("==")
162
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0) or label_cond:
163
+ raise UnprocessableEntity("1021-invalid input ")
164
+ response = restricttopic_check(payload,id)
165
+ log.info("after invoking restrictedTopic_model service ")
166
+ er=log_dict[request_id_var.get()]
167
+ logobj = {"_id":id,"error":er}
168
+ if len(er)!=0:
169
+ log.debug(str(logobj))
170
+ del log_dict[id]
171
+ log.debug("response : " + str(response))
172
+ # log.debug("response : " + str(response))
173
+ log.info("exit restrictedTopic_model routing method")
174
+ log.info(f"Time taken by RestrictedTopic{time.time()-st}")
175
+ # print(type(response))
176
+ # print(type(jsonable_encoder(response)))
177
+ return jsonable_encoder(response)
178
+ except UnprocessableEntity as cie:
179
+ log.error(str(cie.__dict__))
180
+ log.info("exit restrictedTopic_model routing method")
181
+ raise UnprocessableEntity(**cie.__dict__)
182
+ except Exception as cie:
183
+ log.error(str(cie))
184
+ log.error(str(cie.__dict__))
185
+ log.info("exit restrictedTopic_model routing method")
186
+ raise HTTPException()
187
+
188
+ @router.route("/multi_q_net_embedding",methods=[ 'POST'])
189
+ def embedding_model():
190
+ st=time.time()
191
+ id=uuid.uuid4().hex
192
+ request_id_var.set(id)
193
+ log.info("Entered embedding_model routing method")
194
+
195
+ try:
196
+
197
+ # id=uuid.uuid4().hex
198
+ payload=request.get_json()
199
+ # request_id_var.set(id)
200
+
201
+ log.info("before invoking embedding_model service ")
202
+ log_dict[request_id_var.get()]=[]
203
+ if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
204
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
205
+ response = multi_q_net_embedding(id,payload['text'])
206
+ log.info("after invoking embedding_model service ")
207
+ er=log_dict[request_id_var.get()]
208
+ logobj = {"_id":id,"error":er}
209
+ if len(er)!=0:
210
+ log.debug(str(logobj))
211
+ del log_dict[id]
212
+ # log.debug("response : " + str(response))
213
+ # log.debug("response : " + str(response))
214
+ log.info("exit embedding_model routing method")
215
+ log.info(f"Time taken by Jailbreak {time.time()-st}")
216
+ return jsonable_encoder(response)
217
+ except UnprocessableEntity as cie:
218
+ log.error(str(cie.__dict__))
219
+ log.info("exit embedding_model routing method")
220
+ raise UnprocessableEntity(**cie.__dict__)
221
+ except Exception as cie:
222
+ log.error(str(cie.__dict__))
223
+ log.info("exit embedding_model routing method")
224
+ raise HTTPException()
225
+
226
+ @router.route("/multi-qa-mpnet-model_similarity",methods=[ 'POST'])
227
+ def similarity_model():
228
+ st=time.time()
229
+ id=uuid.uuid4().hex
230
+ request_id_var.set(id)
231
+ log.info("Entered similarity_model routing method")
232
+ try:
233
+ log.info("before invoking similarity_model service ")
234
+ payload=request.get_json()
235
+ log_dict[request_id_var.get()]=[]
236
+ emb1_cond=None
237
+ emb2_cond=None
238
+
239
+ text1_cond = payload['text1'] is None or (payload['text1'] is not None and len(payload['text1'])==0)
240
+ text2_cond = payload['text2'] is None or (payload['text2'] is not None and len(payload['text2'])==0)
241
+ if('emb1' in payload):
242
+ emb1_cond = payload['emb1'] is None or (payload['emb1'] is not None and len(payload['emb1'])==0)
243
+ else:
244
+ payload['emb1']=None
245
+ if('emb2' in payload):
246
+ emb2_cond = payload['emb2'] is None or (payload['emb2'] is not None and len(payload['emb2'])==0)
247
+ else:
248
+ payload['emb2']=None
249
+
250
+ if text1_cond or text2_cond or emb1_cond or emb2_cond:
251
+ raise UnprocessableEntity("1021-Input Text should not be empty ")
252
+ response = multi_q_net_similarity(id,payload['text1'],payload['text2'],payload['emb1'],payload['emb2'])
253
+ log.info("after invoking similarity_model service ")
254
+ er=log_dict[request_id_var.get()]
255
+ logobj = {"_id":id,"error":er}
256
+ if len(er)!=0:
257
+ log.debug(str(logobj))
258
+ del log_dict[id]
259
+ log.debug("response : " + str(response))
260
+ # log.debug("response : " + str(response))
261
+ log.info("exit similarity_model routing method")
262
+ log.info(f"Time taken by similary{time.time()-st}")
263
+ return jsonable_encoder(response)
264
+ except UnprocessableEntity as cie:
265
+ log.error(str(cie.__dict__))
266
+ log.info("exit similarity_model routing method")
267
+ raise UnprocessableEntity(**cie.__dict__)
268
+ except Exception as cie:
269
+ log.error(str(cie.__dict__))
270
+ log.info("exit similarity_model routing method")
271
+ raise HTTPException()
272
+
273
+ # @router.route("/callmodel",methods=[ 'POST'])
274
+ # def ml():
275
+ # st=time.time()
276
+ # id=uuid.uuid4().hex
277
+ # request_id_var.set(id)
278
+ # log.info("Entered embedding_model routing method")
279
+
280
+ # try:
281
+
282
+ # # id=uuid.uuid4().hex
283
+ # payload=request.get_json()
284
+ # # request_id_var.set(id)
285
+
286
+ # log.info("before invoking embedding_model service ")
287
+ # log_dict[request_id_var.get()]=[]
288
+ # # if payload['text'] is None or (payload['text'] is not None and len(payload['text'])==0):
289
+ # # raise UnprocessableEntity("1021-Input Text should not be empty ")
290
+ # response = checkall(id,payload)
291
+ # log.info("after invoking embedding_model service ")
292
+ # er=log_dict[request_id_var.get()]
293
+ # logobj = {"_id":id,"error":er}
294
+ # if len(er)!=0:
295
+ # log.debug(str(logobj))
296
+ # del log_dict[id]
297
+ # # log.debug("response : " + str(response))
298
+ # # log.debug("response : " + str(response))
299
+ # log.info("exit embedding_model routing method")
300
+ # log.info(f"Time taken by Jailbreak {time.time()-st}")
301
+ # return jsonable_encoder(response)
302
+ # except Exception as cie:
303
+ # log.error(cie.__dict__)
304
+ # log.info("exit embedding_model routing method")
305
+ # raise HTTPException()
306
+
307
+
src/routing/safety_router.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ from flask import Blueprint
12
+ import time
13
+ from flask import request
14
+ from werkzeug.exceptions import HTTPException
15
+ # from service.safety_service import ImageGen
16
+ import uuid
17
+ from exception.exception import modeldeploymentException
18
+ from config.logger import CustomLogger, request_id_var
19
+ from io import BytesIO
20
+ import base64
21
+
22
+
23
+ img_router = Blueprint('img_router', __name__,)
24
+ log=CustomLogger()
25
+
26
+ request_id_var.set('startup')
27
+ @img_router.post("/ImageGenerate")
28
+ def img():
29
+ prompt = request.form.get("prompt")
30
+ st=time.time()
31
+ id=uuid.uuid4().hex
32
+ request_id_var.set(id)
33
+ log.info("Entered create usecase routing method")
34
+ try:
35
+ # log.info("before invoking create usecase service")
36
+
37
+ # response = ImageGen.generate(prompt)
38
+ # log.info("after invoking create usecase service ")
39
+
40
+ # log.debug("response : " + str(response))
41
+ log.info("exit create usecase routing method")
42
+ log.info(f"Time taken by toxicity {time.time()-st}")
43
+ imageByte=BytesIO()
44
+ response.save(imageByte, format="png")
45
+ imageByte=imageByte.getvalue()
46
+
47
+ return base64.b64encode(imageByte).decode('utf-8')
48
+ except modeldeploymentException as cie:
49
+ log.error(cie.__dict__)
50
+ log.info("exit create usecase routing method")
51
+ raise HTTPException(**cie.__dict__)
52
+
53
+
src/service/__init__.py ADDED
File without changes
src/service/safety_service.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Copyright 2024-2025 Infosys Ltd.
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5
+
6
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7
+
8
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9
+ '''
10
+
11
+ from diffusers import StableDiffusionPipeline
12
+ import torch
13
+
14
+
15
+ # model_name = "../models/stable_diffusion_v_1_5"
16
+ if(torch.cuda.is_available()):
17
+ pipe = StableDiffusionPipeline.from_pretrained(model_name,torch_dtype=torch.float16,safety_checker=None)
18
+ inference=50
19
+ device = torch.device("cuda")
20
+ pipe=pipe.to(device)
21
+
22
+ else:
23
+ pipe = StableDiffusionPipeline.from_pretrained(model_name,safety_checker=None)
24
+ inference=2
25
+
26
+ class ImageGen:
27
+ def generate(prompt):
28
+ try:
29
+ image = pipe(prompt,num_inference_steps=inference).images[0]
30
+ # image.show()
31
+ return image
32
+ except Exception as e:
33
+
34
+ raise Exception("Error in generating image")
35
+
src/service/service.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ '''
3
+ Copyright 2024-2025 Infosys Ltd.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6
+
7
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
+
9
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10
+ '''
11
+ import multiprocessing
12
+ import threading
13
+
14
+ import math
15
+ import torch
16
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
17
+ from sentence_transformers import SentenceTransformer,util
18
+ from detoxify import Detoxify
19
+ #from presidio_analyzer import AnalyzerEngine, RecognizerRegistry
20
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
21
+ from werkzeug.exceptions import InternalServerError
22
+ from fastapi.encoders import jsonable_encoder
23
+ import traceback
24
+ from mapper.mapper import *
25
+ import time
26
+ import contextvars
27
+ from config.logger import CustomLogger,request_id_var
28
+ from privacy.privacy import Privacy as ps
29
+
30
+ log = CustomLogger()
31
+
32
+ import sys
33
+ import os
34
+
35
+ try:
36
+ if getattr(sys, 'frozen', False):
37
+ application_path = sys._MEIPASS
38
+ else:
39
+ application_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
40
+
41
+ log=CustomLogger()
42
+ log.info("before loading model")
43
+ request_id_var = contextvars.ContextVar("request_id_var")
44
+ #pipe = StableDiffusionPipeline.from_pretrained('/model/stablediffusion/fp32/model')
45
+ device = "cuda"
46
+ # registry = RecognizerRegistry()
47
+ # registry.load_predefined_recognizers()
48
+ # analyzer_engine = AnalyzerEngine(registry=registry)
49
+
50
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
51
+ print("device",device)
52
+ gpu=0 if torch.cuda.is_available() else -1
53
+ # BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
54
+ # # for i in os.walk(BASE_DIR)
55
+ # print(BASE_DIR)
56
+ # print(os.listdir(BASE_DIR))
57
+ # print(os.listdir())
58
+ check_point = 'toxic_debiased-c7548aa0.ckpt'
59
+ toxicityModel = Detoxify(checkpoint=os.path.join(application_path, 'models/detoxify/'+ check_point),
60
+ device=device,
61
+ huggingface_config_path=os.path.join(application_path, 'models/detoxify'))
62
+ tokenizer = AutoTokenizer.from_pretrained(os.path.join(application_path, "models/detoxify"))
63
+
64
+
65
+ PromptModel_dberta = AutoModelForSequenceClassification.from_pretrained(os.path.join(application_path, "models/dbertaInjection")).to(device)
66
+ Prompttokens_dberta = AutoTokenizer.from_pretrained(os.path.join(application_path, "models/dbertaInjection"))
67
+ promtModel = pipeline("text-classification", model=PromptModel_dberta, tokenizer=Prompttokens_dberta, device=device)
68
+
69
+ #topictokenizer_Facebook = AutoTokenizer.from_pretrained("../models/facebook")
70
+ #topicmodel_Facebook = AutoModelForSequenceClassification.from_pretrained("../models/facebook").to(device)
71
+
72
+ topictokenizer_dberta = AutoTokenizer.from_pretrained(os.path.join(application_path,"models/restricted-dberta-base-zeroshot-v2"))
73
+ topicmodel_dberta = AutoModelForSequenceClassification.from_pretrained(os.path.join(application_path,"models/restricted-dberta-base-zeroshot-v2")).to(device)
74
+ nlp = pipeline('zero-shot-classification', model=topicmodel_dberta, tokenizer=topictokenizer_dberta,device=gpu)
75
+ # classifier = pipeline("zero-shot-classification",model="../../models/facebook",device=device)
76
+ # classifier2 = pipeline("zero-shot-classification",model="../../models/restricted-dberta-large-zeroshot",device=device)
77
+ encoder = SentenceTransformer(os.path.join(application_path, "models/multi-qa-mpnet-base-dot-v1")).to(device)
78
+
79
+ jailbreakModel = encoder
80
+ similarity_model =encoder
81
+ request_id_var.set("Startup")
82
+ log_dict={}
83
+ log.info("model loaded")
84
+
85
+ except Exception as e:
86
+ log.error(f"Exception: {e}")
87
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
88
+
89
+ def privacy(id,text):
90
+ log.info("inside privacy")
91
+
92
+ try:
93
+ st = time.time()
94
+ res=ps.textAnalyze({"inputText":text,
95
+ "account": None,
96
+ "portfolio":None,
97
+ "exclusionList": None,
98
+ "piiEntitiesToBeRedacted":None,
99
+ "nlp":None,
100
+ "fakeData": "false"})
101
+ et = time.time()
102
+ rt = et-st
103
+ #print("result start",res.PIIEntities,"result end", "time",rt)
104
+
105
+ # output['PIIresult'] = {"PIIresult":res.PIIEntities,"modelcalltime":round(rt,3)}
106
+ return {"PIIresult":res.PIIEntities,"modelcalltime":round(rt,3)}
107
+ except Exception as e:
108
+
109
+ log.error("Error occured in privacy")
110
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
111
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
112
+ "Error Module":"Failed at privacy call"})
113
+ raise InternalServerError()
114
+
115
+ def multi_q_net_similarity(id,text1=None,text2=None,emb1=None,emb2=None):
116
+
117
+ try:
118
+ st = time.time()
119
+ if text1:
120
+ with torch.no_grad():
121
+ emb1 = jailbreakModel.encode(text1, convert_to_tensor=True,device=device)
122
+ if text2:
123
+ with torch.no_grad():
124
+ emb2 = jailbreakModel.encode(text2, convert_to_tensor=True,device=device)
125
+
126
+ emb = util.pytorch_cos_sim(emb1, emb2).to("cpu").numpy().tolist()
127
+ del emb1
128
+ del emb2
129
+ et = time.time()
130
+ rt =et-st
131
+ return emb,{'time_taken': str(round(rt,3))+"s"}
132
+ except Exception as e:
133
+
134
+ log.error("Error occured in multi_q_net_similarity")
135
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
136
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
137
+ "Error Module":"Failed at multi_q_net_similarity call"})
138
+ raise InternalServerError()
139
+
140
+ def multi_q_net_embedding(id,lst):
141
+ log.info("inside multi_q_net_embedding")
142
+
143
+ try:
144
+ st = time.time()
145
+ # print("start time JB===========",lst,st)
146
+
147
+ res = []
148
+ for text in lst:
149
+ with torch.no_grad():
150
+ text_embedding = jailbreakModel.encode(text, convert_to_tensor=True,device=device)
151
+ res.append(text_embedding.to("cpu").numpy().tolist())
152
+
153
+ del text_embedding
154
+ et = time.time()
155
+ rt = et-st
156
+ # output['multi_q_net_embedding'] =(res,{'time_taken': str(round(rt,3))+"s"})
157
+ return res,{'time_taken': str(round(rt,3))+"s"}
158
+ # return output
159
+ # return text_embedding.numpy().tolist()
160
+ except Exception as e:
161
+
162
+ log.error("Error occured in multi_q_net text embedding")
163
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
164
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
165
+ "Error Module":"Failed at multi_q_net text embedding call"})
166
+ raise InternalServerError()
167
+
168
+ def restricttopic_check(payload,id):
169
+ log.info("inside restricttopic_check")
170
+
171
+ try:
172
+ st = time.time()
173
+ # topicmodel = topicmodel_Facebook
174
+ # topictokenizer = topictokenizer_Facebook
175
+
176
+ # nlp = pipeline('zero-shot-classification', model=classifier, tokenizer=topictokenizer)
177
+
178
+ text=payload['text']
179
+ labels=payload['labels']
180
+ hypothesis_template = "The topic of this text is {}"
181
+ #commented to use just dberta model
182
+ #model =payload['model'] if hasattr(payload, 'model') else "facebook"
183
+ #if model==None:
184
+ # model="dberta"
185
+
186
+ # if model=="facebook":
187
+ # # nlp = classifier
188
+ # nlp = pipeline('zero-shot-classification', model=topicmodel_Facebook, tokenizer=topictokenizer_Facebook, device=gpu)
189
+ # elif model=="dberta":
190
+ # # nlp = classifier2
191
+ # nlp = pipeline('zero-shot-classification', model=topicmodel_dberta, tokenizer=topictokenizer_dberta,device=gpu)
192
+ nlp = pipeline('zero-shot-classification', model=topicmodel_dberta, tokenizer=topictokenizer_dberta,device=gpu)
193
+ with torch.no_grad():
194
+ output=nlp(text, labels,hypothesis_template=hypothesis_template,multi_label=True)
195
+
196
+ for i in range(len(output["scores"])):
197
+ output["scores"][i] = round(output["scores"][i],4)
198
+
199
+ del nlp
200
+ et = time.time()
201
+ rt = et-st
202
+ output['time_taken'] = str(round(rt,3))+"s"
203
+ # output1['restricttopic'] = output
204
+ return output
205
+
206
+ except Exception as e:
207
+
208
+ log.error("Error occured in restricttopic_check")
209
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
210
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
211
+ "Error Module":"Failed at restricttopic_check call"})
212
+ raise InternalServerError()
213
+
214
+ def toxicity_check(payload,id) :
215
+ log.info("inside toxicity_check")
216
+
217
+ try:
218
+ st = time.time()
219
+ text = payload['text']
220
+
221
+ #to check number of tokens
222
+ input_ids_val = tokenizer.encode(text)
223
+ input_ids=input_ids_val[1:-1]
224
+ result_list=[]
225
+ #to send max 510 tokens to the model at a time and at end find avg result for each token set
226
+ if len(input_ids)>510:
227
+ val=math.ceil(len(input_ids)/510)
228
+ j=0
229
+ k=510
230
+ for i in range(0,val):
231
+ text="".join(tokenizer.decode(input_ids[j:k]))
232
+ j+=510
233
+ k+=510
234
+ with torch.no_grad():
235
+ result = toxicityModel.predict(text)
236
+ result_list.append(result)
237
+ output = {
238
+ 'toxicity': 0,
239
+ 'severe_toxicity': 0,
240
+ 'obscene': 0,
241
+ 'threat': 0,
242
+ 'insult': 0,
243
+ 'identity_attack': 0,
244
+ 'sexual_explicit': 0
245
+ }
246
+ for j in result_list:
247
+ output['toxicity']+=j['toxicity']
248
+ output['severe_toxicity']+=j['severe_toxicity']
249
+ output['obscene']+=j['obscene']
250
+ output['identity_attack']+=j['identity_attack']
251
+ output['insult']+=j['insult']
252
+ output['threat']+=j['threat']
253
+ output['sexual_explicit']+=j['sexual_explicit']
254
+ output = {k: v / len(result_list) for k, v in output.items()}
255
+ else:
256
+ with torch.no_grad():
257
+ output = toxicityModel.predict(text)
258
+ List_profanity_score = []
259
+ obj_profanityScore_toxic = profanityScore(metricName='toxicity',
260
+ metricScore=output['toxicity'])
261
+ obj_profanityScore_severe_toxic = profanityScore(metricName='severe_toxicity',
262
+ metricScore=output['severe_toxicity'])
263
+ obj_profanityScore_obscene = profanityScore(metricName='obscene',
264
+ metricScore=output['obscene'])
265
+ obj_profanityScore_threat = profanityScore(metricName='threat',
266
+ metricScore=output['threat'])
267
+ obj_profanityScore_insult = profanityScore(metricName='insult',
268
+ metricScore=output['insult'])
269
+ obj_profanityScore_identity_attack = profanityScore(metricName='identity_attack',
270
+ metricScore=output['identity_attack'])
271
+ obj_profanityScore_sexual_explicit = profanityScore(metricName='sexual_explicit',
272
+ metricScore=output['sexual_explicit'])
273
+
274
+ List_profanity_score.append(obj_profanityScore_toxic)
275
+ List_profanity_score.append(obj_profanityScore_severe_toxic)
276
+ List_profanity_score.append(obj_profanityScore_obscene)
277
+ List_profanity_score.append(obj_profanityScore_threat)
278
+ List_profanity_score.append(obj_profanityScore_insult)
279
+ List_profanity_score.append(obj_profanityScore_identity_attack)
280
+ List_profanity_score.append(obj_profanityScore_sexual_explicit)
281
+
282
+ objProfanityAnalyzeResponse = {}
283
+ objProfanityAnalyzeResponse['toxicScore'] = List_profanity_score
284
+ et = time.time()
285
+ rt = et-st
286
+ objProfanityAnalyzeResponse['time_taken'] = str(round(rt,3))+"s"
287
+ # output1['toxicity'] = objProfanityAnalyzeResponse
288
+ return objProfanityAnalyzeResponse
289
+
290
+ except Exception as e:
291
+
292
+ log.error("Error occured in toxicity_check")
293
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
294
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
295
+ "Error Module":"Failed at toxicity_check call"})
296
+ raise InternalServerError()
297
+
298
+ def promptInjection_check(text,id):
299
+
300
+ log.info("inside promptInjection_check")
301
+ try:
302
+ st = time.time()
303
+ result = promtModel(text)
304
+ # print("============:",result)
305
+ predicted_label_name = result[0]["label"]
306
+ predicted_probabilities = result[0]["score"]
307
+ # del tokens
308
+ et = time.time()
309
+ rt = et-st
310
+ # output['promptInjection'] = (predicted_label_name,predicted_probabilities, {'time_taken':str(round(rt,3))+"s"})
311
+
312
+ return predicted_label_name,predicted_probabilities, {'time_taken':str(round(rt,3))+"s"}
313
+ except Exception as e:
314
+
315
+ log.error("Error occured in promptInjection_check")
316
+ log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
317
+ log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
318
+ "Error Module":"Failed at promptInjection_check call"})
319
+ raise InternalServerError()
320
+
321
+ # def checkall(id,payload):
322
+ # try:
323
+ # st = time.time()
324
+ # output = {}
325
+ # x=[]
326
+ # threads = []
327
+ # results = []
328
+ # thread = threading.Thread(target=toxicity_check, args=(payload['text'],id,output))
329
+ # threads.append(thread)
330
+ # thread.start()
331
+ # thread1 = threading.Thread(target=promptInjection_check, args=(payload['text']['text'],id,output))
332
+ # threads.append(thread1)
333
+ # thread1.start()
334
+ # thread2 = threading.Thread(target=restricttopic_check, args=(payload["restric"],id,output))
335
+ # threads.append(thread2)
336
+ # thread2.start()
337
+ # thread3 = threading.Thread(target=multi_q_net_embedding, args=(id,payload['embed']["text"],output))
338
+ # threads.append(thread3)
339
+ # thread3.start()
340
+ # thread4 = threading.Thread(target=privacy, args=(id,payload['text']['text'],output))
341
+ # threads.append(thread4)
342
+ # thread4.start()
343
+ # for thread in threads:
344
+ # thread.join()
345
+ # # print("=======================:",result)
346
+ # # results.append(thread.result)
347
+ # # with multiprocessing.Pool() as pool:
348
+ # # output['toxicity'] =pool.starmap(toxicity_check, [(payload["text"],id)])
349
+ # # output['promptInjection'] = pool.starmap(promptInjection_check, [(payload['text'],id)])
350
+ # # output['restricttopic'] = pool.starmap(restricttopic_check, [(payload["restric"],id)])
351
+ # # output['multi_q_net_embedding'] = pool.starmap(multi_q_net_embedding, [(id,payload['embed']["text"])])
352
+ # # output['privacy'] = pool.starmap(privacy, [(id,payload['text'])])
353
+ # # print("output",output)
354
+ # et = time.time()
355
+ # rt = et-st
356
+ # output['time_taken'] = str(round(rt,3))+"s"
357
+ # return output
358
+ # except Exception as e:
359
+
360
+ # log.error("Error occured in checkall")
361
+ # log.error(f"Exception: {str(traceback.extract_tb(e.__traceback__)[0].lineno),e}")
362
+ # log_dict[request_id_var.get()].append({"Line number":str(traceback.extract_tb(e.__traceback__)[0].lineno),"Error":str(e),
363
+ # "Error Module":"Failed at checkall call"})
364
+ # raise InternalServerError()
src/static/swagger.json ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "openapi": "3.0.3",
3
+ "info": {
4
+ "title": "Infosys Responsible AI - LLM Moderation Models - OpenAPI 3.0",
5
+ "description": "API specs for Infosys Responsible AI LLM Moderation layer Models in OpenAPI 3.0 format",
6
+ "contact": {
7
+ "email": "[email protected]"
8
+ },
9
+ "license": {
10
+ "name": "Infosys",
11
+ "url": "https://www.infosys.com"
12
+ },
13
+ "version": "0.0.1"
14
+ },
15
+ "security" : [ {
16
+ "oauth_auth" : [ "write:users", "read:users" ]
17
+ }
18
+ ],
19
+ "paths": {
20
+ "/rai/v1/raimoderationmodels/detoxifymodel": {
21
+ "post": {
22
+ "security": [{
23
+ "my_auth": ["write:users"]
24
+ }],
25
+ "tags": [
26
+ "Infosys Responsible AI - LLM Moderation"
27
+ ],
28
+ "summary": "Toxic Model",
29
+ "operationId": "toxic_model_rai_v1_models_detoxifymodel_post",
30
+ "requestBody": {
31
+ "content": {
32
+ "application/json": {
33
+ "schema": {
34
+ "$ref": "#/components/schemas/detoxifyRequest"
35
+ }
36
+ }
37
+ },
38
+ "required": true
39
+ },
40
+ "responses": {
41
+ "200": {
42
+ "description": "Successful Response",
43
+ "content": {
44
+ "application/json": {
45
+ "schema": {
46
+ "$ref": "#/components/schemas/detoxifyResponse"
47
+ }
48
+ }
49
+ }
50
+ },
51
+ "422": {
52
+ "description": "Validation Error",
53
+ "content": {
54
+ "application/json": {
55
+ "schema": {
56
+ "$ref": "#/components/schemas/HTTPValidationError"
57
+ }
58
+ }
59
+ }
60
+ },
61
+ "401":
62
+ {
63
+ "description":"Unauthorized Access Error",
64
+ "content":{
65
+ "application/json":{
66
+ "schema":{
67
+ "$ref":"#/components/schemas/HTTPUnauthorizedAccessError"
68
+ }
69
+ }
70
+ }
71
+ },
72
+ "403":
73
+ {
74
+ "description":"Forbidden Access Error",
75
+ "content":{
76
+ "application/json":{
77
+ "schema":{
78
+ "$ref":"#/components/schemas/HTTPForbiddenAccessError"
79
+ }
80
+ }
81
+ }
82
+ }
83
+ }
84
+ }
85
+ },
86
+ "/rai/v1/raimoderationmodels/privacy": {
87
+ "post": {
88
+ "security": [{
89
+ "my_auth": ["write:users"]
90
+ }],
91
+ "tags": [
92
+ "Infosys Responsible AI - LLM Moderation"
93
+ ],
94
+ "summary": "Pii Check",
95
+ "operationId": "pii_check_rai_v1_models_privacy_post",
96
+ "requestBody": {
97
+ "content": {
98
+ "application/json": {
99
+ "schema": {
100
+ "$ref": "#/components/schemas/privacyRequest"
101
+ }
102
+ }
103
+ },
104
+ "required": true
105
+ },
106
+ "responses": {
107
+ "200": {
108
+ "description": "Successful Response",
109
+ "content": {
110
+ "application/json": {
111
+ "schema": {}
112
+ }
113
+ }
114
+ },
115
+ "422": {
116
+ "description": "Validation Error",
117
+ "content": {
118
+ "application/json": {
119
+ "schema": {
120
+ "$ref": "#/components/schemas/HTTPValidationError"
121
+ }
122
+ }
123
+ }
124
+ },
125
+ "401":
126
+ {
127
+ "description":"Unauthorized Access Error",
128
+ "content":{
129
+ "application/json":{
130
+ "schema":{
131
+ "$ref":"#/components/schemas/HTTPUnauthorizedAccessError"
132
+ }
133
+ }
134
+ }
135
+ },
136
+ "403":
137
+ {
138
+ "description":"Forbidden Access Error",
139
+ "content":{
140
+ "application/json":{
141
+ "schema":{
142
+ "$ref":"#/components/schemas/HTTPForbiddenAccessError"
143
+ }
144
+ }
145
+ }
146
+ }
147
+ }
148
+ }
149
+ },
150
+ "/rai/v1/raimoderationmodels/promptinjectionmodel": {
151
+ "post": {
152
+ "security": [{
153
+ "my_auth": ["write:users"]
154
+ }],
155
+ "tags": [
156
+ "Infosys Responsible AI - LLM Moderation"
157
+ ],
158
+ "summary": "Prompt Model",
159
+ "operationId": "prompt_model_rai_v1_models_promptinjectionmodel_post",
160
+ "requestBody": {
161
+ "content": {
162
+ "application/json": {
163
+ "schema": {
164
+ "$ref": "#/components/schemas/detoxifyRequest"
165
+ }
166
+ }
167
+ },
168
+ "required": true
169
+ },
170
+ "responses": {
171
+ "200": {
172
+ "description": "Successful Response",
173
+ "content": {
174
+ "application/json": {
175
+ "schema": {}
176
+ }
177
+ }
178
+ },
179
+ "422": {
180
+ "description": "Validation Error",
181
+ "content": {
182
+ "application/json": {
183
+ "schema": {
184
+ "$ref": "#/components/schemas/HTTPValidationError"
185
+ }
186
+ }
187
+ }
188
+ },
189
+ "401":
190
+ {
191
+ "description":"Unauthorized Access Error",
192
+ "content":{
193
+ "application/json":{
194
+ "schema":{
195
+ "$ref":"#/components/schemas/HTTPUnauthorizedAccessError"
196
+ }
197
+ }
198
+ }
199
+ },
200
+ "403":
201
+ {
202
+ "description":"Forbidden Access Error",
203
+ "content":{
204
+ "application/json":{
205
+ "schema":{
206
+ "$ref":"#/components/schemas/HTTPForbiddenAccessError"
207
+ }
208
+ }
209
+ }
210
+ }
211
+ }
212
+ }
213
+ },
214
+ "/rai/v1/raimoderationmodels/restrictedtopicmodel": {
215
+ "post": {
216
+ "security": [{
217
+ "my_auth": ["write:users"]
218
+ }],
219
+ "tags": [
220
+ "Infosys Responsible AI - LLM Moderation"
221
+ ],
222
+ "summary": "Restrictedtopic Model",
223
+ "operationId": "restrictedTopic_model_rai_v1_models_restrictedtopicmodel_post",
224
+ "requestBody": {
225
+ "content": {
226
+ "application/json": {
227
+ "schema": {
228
+ "$ref": "#/components/schemas/RestrictedTopicRequest"
229
+ }
230
+ }
231
+ },
232
+ "required": true
233
+ },
234
+ "responses": {
235
+ "200": {
236
+ "description": "Successful Response",
237
+ "content": {
238
+ "application/json": {
239
+ "schema": {}
240
+ }
241
+ }
242
+ },
243
+ "422": {
244
+ "description": "Validation Error",
245
+ "content": {
246
+ "application/json": {
247
+ "schema": {
248
+ "$ref": "#/components/schemas/HTTPValidationError"
249
+ }
250
+ }
251
+ }
252
+ },
253
+ "401":
254
+ {
255
+ "description":"Unauthorized Access Error",
256
+ "content":{
257
+ "application/json":{
258
+ "schema":{
259
+ "$ref":"#/components/schemas/HTTPUnauthorizedAccessError"
260
+ }
261
+ }
262
+ }
263
+ },
264
+ "403":
265
+ {
266
+ "description":"Forbidden Access Error",
267
+ "content":{
268
+ "application/json":{
269
+ "schema":{
270
+ "$ref":"#/components/schemas/HTTPForbiddenAccessError"
271
+ }
272
+ }
273
+ }
274
+ }
275
+ }
276
+ }
277
+ },
278
+ "/rai/v1/raimoderationmodels/multi_q_net_embedding": {
279
+ "post": {
280
+ "security": [{
281
+ "my_auth": ["write:users"]
282
+ }],
283
+ "tags": [
284
+ "Infosys Responsible AI - LLM Moderation"
285
+ ],
286
+ "summary": "Embedding Model",
287
+ "operationId": "embedding_model_rai_v1_models_multi_q_net_embedding_post",
288
+ "requestBody": {
289
+ "content": {
290
+ "application/json": {
291
+ "schema": {
292
+ "$ref": "#/components/schemas/JailbreakRequest"
293
+ }
294
+ }
295
+ },
296
+ "required": true
297
+ },
298
+ "responses": {
299
+ "200": {
300
+ "description": "Successful Response",
301
+ "content": {
302
+ "application/json": {
303
+ "schema": {}
304
+ }
305
+ }
306
+ },
307
+ "422": {
308
+ "description": "Validation Error",
309
+ "content": {
310
+ "application/json": {
311
+ "schema": {
312
+ "$ref": "#/components/schemas/HTTPValidationError"
313
+ }
314
+ }
315
+ }
316
+ },
317
+ "401":
318
+ {
319
+ "description":"Unauthorized Access Error",
320
+ "content":{
321
+ "application/json":{
322
+ "schema":{
323
+ "$ref":"#/components/schemas/HTTPUnauthorizedAccessError"
324
+ }
325
+ }
326
+ }
327
+ },
328
+ "403":
329
+ {
330
+ "description":"Forbidden Access Error",
331
+ "content":{
332
+ "application/json":{
333
+ "schema":{
334
+ "$ref":"#/components/schemas/HTTPForbiddenAccessError"
335
+ }
336
+ }
337
+ }
338
+ }
339
+ }
340
+ }
341
+ },
342
+ "/rai/v1/raimoderationmodels/multi-qa-mpnet-model_similarity": {
343
+ "post": {
344
+ "security": [{
345
+ "my_auth": ["write:users"]
346
+ }],
347
+ "tags": [
348
+ "Infosys Responsible AI - LLM Moderation"
349
+ ],
350
+ "summary": "Similarity Model",
351
+ "operationId": "similarity_model_rai_v1_models_multi_qa_mpnet_model_similarity_post",
352
+ "parameters": [
353
+ {
354
+ "required": false,
355
+ "schema": {
356
+ "type": "string",
357
+ "title": "Authorization"
358
+ },
359
+ "name": "authorization",
360
+ "in": "header"
361
+ }
362
+ ],
363
+ "requestBody": {
364
+ "content": {
365
+ "application/json": {
366
+ "schema": {
367
+ "$ref": "#/components/schemas/SimilarityRequest"
368
+ }
369
+ }
370
+ },
371
+ "required": true
372
+ },
373
+ "responses": {
374
+ "200": {
375
+ "description": "Successful Response",
376
+ "content": {
377
+ "application/json": {
378
+ "schema": {}
379
+ }
380
+ }
381
+ },
382
+ "422": {
383
+ "description": "Validation Error",
384
+ "content": {
385
+ "application/json": {
386
+ "schema": {
387
+ "$ref": "#/components/schemas/HTTPValidationError"
388
+ }
389
+ }
390
+ }
391
+ },
392
+ "401":
393
+ {
394
+ "description":"Unauthorized Access Error",
395
+ "content":{
396
+ "application/json":{
397
+ "schema":{
398
+ "$ref":"#/components/schemas/HTTPUnauthorizedAccessError"
399
+ }
400
+ }
401
+ }
402
+ },
403
+ "403":
404
+ {
405
+ "description":"Forbidden Access Error",
406
+ "content":{
407
+ "application/json":{
408
+ "schema":{
409
+ "$ref":"#/components/schemas/HTTPForbiddenAccessError"
410
+ }
411
+ }
412
+ }
413
+ }
414
+ }
415
+ }
416
+ }
417
+ },
418
+ "components": {
419
+ "schemas": {
420
+ "HTTPValidationError": {
421
+ "properties": {
422
+ "detail": {
423
+ "items": {
424
+ "$ref": "#/components/schemas/ValidationError"
425
+ },
426
+ "type": "array",
427
+ "title": "Detail"
428
+ }
429
+ },
430
+ "type": "object",
431
+ "title": "HTTPValidationError"
432
+ },
433
+ "HTTPUnauthorizedAccessError":{
434
+ "properties":{
435
+ "detail":{
436
+ "items":{
437
+ "$ref":"#/components/schemas/UnauthorizedAccessError"
438
+ },
439
+ "type":"array",
440
+ "title":"Detail"
441
+ }
442
+ },
443
+ "type":"object",
444
+ "title":"HTTPUnauthorizedAccessError"
445
+ },
446
+ "HTTPForbiddenAccessError":{
447
+ "properties":{
448
+ "detail":{
449
+ "items":{
450
+ "$ref":"#/components/schemas/ForbiddenAccessError"
451
+ },
452
+ "type":"array",
453
+ "title":"Detail"
454
+ }
455
+ },
456
+ "type":"object",
457
+ "title":"HTTPForbiddenAccessError"
458
+ },
459
+ "JailbreakRequest": {
460
+ "properties": {
461
+ "text": {
462
+ "items": {
463
+ "type": "string"
464
+ },
465
+ "type": "array",
466
+ "title": "Text"
467
+ }
468
+ },
469
+ "type": "object",
470
+ "required": [
471
+ "text"
472
+ ],
473
+ "title": "JailbreakRequest"
474
+ },
475
+ "RestrictedTopicRequest": {
476
+ "properties": {
477
+ "text": {
478
+ "type": "string",
479
+ "title": "Text",
480
+ "example": "Russia is the biggest country by area."
481
+ },
482
+ "labels": {
483
+ "items": {},
484
+ "type": "array",
485
+ "title": "Labels",
486
+ "default": [
487
+ "Terrorism",
488
+ "Explosives"
489
+ ]
490
+ }
491
+ },
492
+ "type": "object",
493
+ "required": [
494
+ "text"
495
+ ],
496
+ "title": "RestrictedTopicRequest"
497
+ },
498
+ "SimilarityRequest": {
499
+ "properties": {
500
+ "text1": {
501
+ "type": "string",
502
+ "title": "Text1",
503
+ "example": "Russia is the biggest country by area."
504
+ },
505
+ "text2": {
506
+ "type": "string",
507
+ "title": "Text2",
508
+ "example": "Russia is the biggest country by area."
509
+ },
510
+ "emb1": {
511
+ "items": {},
512
+ "type": "array",
513
+ "title": "Emb1"
514
+ },
515
+ "emb2": {
516
+ "items": {},
517
+ "type": "array",
518
+ "title": "Emb2"
519
+ }
520
+ },
521
+ "type": "object",
522
+ "title": "SimilarityRequest"
523
+ },
524
+ "ValidationError": {
525
+ "properties": {
526
+ "loc": {
527
+ "items": {
528
+ "anyOf": [
529
+ {
530
+ "type": "string"
531
+ },
532
+ {
533
+ "type": "integer"
534
+ }
535
+ ]
536
+ },
537
+ "type": "array",
538
+ "title": "Location"
539
+ },
540
+ "msg": {
541
+ "type": "string",
542
+ "title": "Message"
543
+ },
544
+ "type": {
545
+ "type": "string",
546
+ "title": "Error Type"
547
+ }
548
+ },
549
+ "type": "object",
550
+ "required": [
551
+ "loc",
552
+ "msg",
553
+ "type"
554
+ ],
555
+ "title": "ValidationError"
556
+ },
557
+ "UnauthorizedAccessError":{
558
+ "properties": {
559
+ "loc": {
560
+ "items": {
561
+ "anyOf": [
562
+ {
563
+ "type": "string"
564
+ },
565
+ {
566
+ "type": "integer"
567
+ }
568
+ ]
569
+ },
570
+ "type": "array",
571
+ "title": "Location"
572
+ },
573
+ "msg": {
574
+ "type": "string",
575
+ "title": "Message"
576
+ },
577
+ "type": {
578
+ "type": "string",
579
+ "title": "Error Type"
580
+ }
581
+ },
582
+ "type": "object",
583
+ "required": [
584
+ "loc",
585
+ "msg",
586
+ "type"
587
+ ],
588
+ "title": "UnauthorizedAccessError"
589
+ },
590
+ "ForbiddenAccessError":{
591
+ "properties": {
592
+ "loc": {
593
+ "items": {
594
+ "anyOf": [
595
+ {
596
+ "type": "string"
597
+ },
598
+ {
599
+ "type": "integer"
600
+ }
601
+ ]
602
+ },
603
+ "type": "array",
604
+ "title": "Location"
605
+ },
606
+ "msg": {
607
+ "type": "string",
608
+ "title": "Message"
609
+ },
610
+ "type": {
611
+ "type": "string",
612
+ "title": "Error Type"
613
+ }
614
+ },
615
+ "type": "object",
616
+ "required": [
617
+ "loc",
618
+ "msg",
619
+ "type"
620
+ ],
621
+ "title": "ForbiddenAccessError"
622
+ },
623
+ "detoxifyRequest": {
624
+ "properties": {
625
+ "text": {
626
+ "type": "string",
627
+ "title": "Text",
628
+ "example": "Russia is the biggest country by area."
629
+ }
630
+ },
631
+ "type": "object",
632
+ "required": [
633
+ "text"
634
+ ],
635
+ "title": "detoxifyRequest"
636
+ },
637
+ "detoxifyResponse": {
638
+ "properties": {
639
+ "toxicScore": {
640
+ "items": {
641
+ "$ref": "#/components/schemas/profanityScore"
642
+ },
643
+ "type": "array",
644
+ "title": "Toxicscore"
645
+ }
646
+ },
647
+ "type": "object",
648
+ "required": [
649
+ "toxicScore"
650
+ ],
651
+ "title": "detoxifyResponse"
652
+ },
653
+ "privacyRequest": {
654
+ "properties": {
655
+ "text": {
656
+ "type": "string",
657
+ "title": "Text",
658
+ "example": "Russia is the biggest country by area."
659
+ }
660
+ },
661
+ "type": "object",
662
+ "required": [
663
+ "text"
664
+ ],
665
+ "title": "privacyRequest"
666
+ },
667
+ "profanityScore": {
668
+ "properties": {
669
+ "metricName": {
670
+ "type": "string",
671
+ "title": "Metricname",
672
+ "example": "toxicity"
673
+ },
674
+ "metricScore": {
675
+ "type": "number",
676
+ "title": "Metricscore",
677
+ "example": 0.78326
678
+ }
679
+ },
680
+ "type": "object",
681
+ "required": [
682
+ "metricName",
683
+ "metricScore"
684
+ ],
685
+ "title": "profanityScore"
686
+ }
687
+ },
688
+ "securitySchemes": {
689
+ "type": "oauth2",
690
+ "flows": {
691
+ "authorizationCode": {
692
+ "authorizationUrl": "http://tes.org/api/oauth/dialog",
693
+ "scopes": {
694
+ "read:users": "read user profiles"
695
+ }
696
+ }
697
+ }
698
+ }
699
+ },
700
+ "tags": [
701
+ {
702
+ "name": "LLM Moderation",
703
+ "description": "Operations required for LLM moderation proxy.",
704
+ "externalDocs": {
705
+ "description": "Find out more",
706
+ "url": "https://www.infosys.com"
707
+ }
708
+ }
709
+ ]
710
+ }