import asyncio import os import threading from threading import Event from typing import Optional import discord import gradio as gr from discord import Permissions from discord.ext import commands from discord.utils import oauth_url import gradio_client as grc from gradio_client.utils import QueueError event = Event() DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") async def wait(job): while not job.done(): await asyncio.sleep(0.2) ## GRADIO UI API LINK ## def get_client(session: Optional[str] = None) -> grc.Client: client = grc.Client("https://cryptoscoutv1-discord-chatbot-csv2.hf.space", hf_token=os.getenv("HF_TOKEN")) if session: client.session_hash = session return client def truncate_response(response: str) -> str: ending = "...\nTruncating response to 2000 characters due to discord api limits." if len(response) > 2000: return response[: 2000 - len(ending)] + ending else: return response intents = discord.Intents.default() intents.message_content = True bot = commands.Bot(command_prefix="/", intents=intents) ## BOT COMMANDS ## @bot.event async def on_ready(): print(f"Logged in as {bot.user} (ID: {bot.user.id})") synced = await bot.tree.sync() print(f"Synced commands: {', '.join([s.name for s in synced])}.") event.set() print("------") # Generate OAuth URL when the bot is ready permissions = Permissions(326417525824) url = oauth_url(bot.user.id, permissions=permissions) print(f"Add this bot to your server by clicking this link: {url}") thread_to_client = {} thread_to_user = {} ## /echo command to return the llm repsonse ## @bot.hybrid_command(name="cryptosearch", description="Enter some text to chat with the bot! Like this: /cryptosearch Hello, how are you?") async def chat(ctx, prompt: str): if ctx.author.id == bot.user.id: return try: # Acknowledge the command immediately - This is the discord message after the command / is called ## await ctx.send(f"Processing: {prompt}") loop = asyncio.get_running_loop() client = await loop.run_in_executor(None, get_client, None) job = client.submit(prompt, api_name="/predict") ## /predict can be used for gradio interface ## await wait(job) try: job.result() response = job.outputs()[-1] await ctx.send(f"```{truncate_response(response)}```") # Send the LLM response except QueueError: await ctx.send("The gradio space powering this bot is really busy! Please try again later!") except Exception as e: print(f"{e}") @bot.hybrid_command(name="cshelp", description="Get information on how to use this bot.") async def help_command(ctx): help_message = """ **How to Use This Bot** - Use `/echo your_message` to interact with the bot. - Your message will be processed, and you will get a response. - If you need any assistance, contact the server admins. """ await ctx.send(help_message) @bot.hybrid_command(name="sentences", description="Choose a predefined sentence.") async def sentences_command(ctx): sentences_list = [ ("sentence1", "value1"), ("sentence2", "value2"), ("sentence3", "value3"), # Add more sentences and values as needed ] for sentence, value in sentences_list: await ctx.send(f"{sentence} - {value}") @bot.event async def on_message(message): if message.author.bot: return if message.content.startswith(bot.command_prefix): await bot.process_commands(message) def run_bot(): if not DISCORD_TOKEN: print("DISCORD_TOKEN NOT SET") event.set() else: bot.run(DISCORD_TOKEN) threading.Thread(target=run_bot).start()