Spaces:
Runtime error
Runtime error
File size: 4,483 Bytes
934ba8d 7701eda e245c57 7701eda 934ba8d 7701eda 934ba8d a9acc74 934ba8d 7701eda 934ba8d 7701eda 934ba8d 7701eda e245c57 934ba8d 7701eda 934ba8d 7701eda 934ba8d 7701eda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import gradio as gr
import cv2
import requests
import os
from ultralyticsplus import YOLO, render_result
# Model Heading and Description
model_heading = "CandleStickScan: Pattern Recognition for Trading Success"
description = """ π―οΈ Introducing CandleScan by Foduu AI π―οΈ
Unleash the power of precise pattern recognition with CandleScan, your ultimate companion for deciphering intricate candlestick formations in the world of trading. ππ
Unlock the secrets of successful trading by effortlessly identifying crucial candlestick patterns such as 'Head and Shoulders Bottom', 'Head and Shoulders Top', 'M-Head', 'StockLine', 'Triangle', and 'W-Bottom'. ππ
Powered by the cutting-edge technology of Foduu AI, CandleScan is your expert guide to navigating the complexities of the market. Whether you're an experienced trader or a novice investor, our app empowers you to make informed decisions with confidence. πΌπ°
But that's not all! CandleScan is just the beginning. If you're hungry for more pattern recognition prowess, simply reach out to us at [email protected]. Our dedicated team is ready to assist you in expanding your trading horizons by integrating additional pattern recognition features. π¬π²
Show your appreciation for this space-age tool by hitting the 'Like' button and start embarking on a journey towards trading mastery with CandleScan! ππ―οΈπ
π§ Contact us: [email protected]
π Like | """
def download_file(url, save_name):
url = url
if not os.path.exists(save_name):
file = requests.get(url)
open(save_name, 'wb').write(file.content)
# Download files
file_urls = [
'https://huggingface.co/spaces/foduucom/CandleStickScan-Stock-trading-yolov8/resolve/main/test/-2022-06-28-12-35-50_png.rf.8dee4bb645ea8b5036721b830d2636b1.jpg',
'https://huggingface.co/spaces/foduucom/CandleStickScan-Stock-trading-yolov8/resolve/main/test/-2022-06-28-12-45-10_png.rf.8b9177546e62a2422ad603b16f1f50b9.jpg',
'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
]
for i, url in enumerate(file_urls):
if 'mp4' in file_urls[i]:
download_file(
file_urls[i],
f"video.mp4"
)
# else:
# download_file(
# file_urls[i],
# f"image_{i}.jpg"
# )
# Load YOLO model
model = YOLO('foduucom/stockmarket-pattern-detection-yolov8')
# Image Inference
def show_preds_image(image_path):
image = cv2.imread(image_path)
outputs = model.predict(source=image_path)
results = outputs[0].cpu().numpy()
for i, det in enumerate(results.boxes.xyxy):
cv2.rectangle(
image,
(int(det[0]), int(det[1])),
(int(det[2]), int(det[3])),
color=(0, 0, 255),
thickness=2,
lineType=cv2.LINE_AA
)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
inputs_image = [
gr.components.Image(type="filepath", label="Input Image"),
]
outputs_image = [
gr.components.Image(type="numpy", label="Output Image"),
]
interface_image = gr.Interface(
fn=show_preds_image,
inputs=inputs_image,
outputs=outputs_image,
title=model_heading,
descripiton=description,
examples=path,
cache_examples=False,
)
# Video Inference
def show_preds_video(video_path):
cap = cv2.VideoCapture(video_path)
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
frame_copy = frame.copy()
outputs = model.predict(source=frame)
results = outputs[0].cpu().numpy()
for i, det in enumerate(results.boxes.xyxy):
cv2.rectangle(
frame_copy,
(int(det[0]), int(det[1])),
(int(det[2]), int(det[3])),
color=(0, 0, 255),
thickness=2,
lineType=cv2.LINE_AA
)
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
inputs_video = [
gr.components.Video(type="filepath", label="Input Video"),
]
outputs_video = [
gr.components.Image(type="numpy", label="Output Image"),
]
interface_video = gr.Interface(
fn=show_preds_video,
inputs=inputs_video,
outputs=outputs_video,
title=model_heading,
descripiton=description,
examples=video_path,
cache_examples=False,
)
gr.TabbedInterface(
[interface_image, interface_video],
tab_names=['Image inference', 'Video inference']
).queue().launch()
|