Aekanun commited on
Commit
2a60823
·
1 Parent(s): b82c4ac
Files changed (3) hide show
  1. app.py +105 -52
  2. app.py.origi +0 -41
  3. app.py.origi1 +133 -0
app.py CHANGED
@@ -12,18 +12,21 @@ try:
12
  except Exception as e:
13
  print(f"Failed to install packages: {e}")
14
 
15
- # [แก้ 1] ย้าย environment variables มาไว้ก่อน imports
 
 
16
  import os
17
  os.environ['NVIDIA_VISIBLE_DEVICES'] = ''
 
18
 
19
  import warnings
20
  import torch
 
 
21
  torch._dynamo.config.suppress_errors = True
22
  torch._dynamo.config.verbose = False
23
 
24
- # [แก้ 2] ย้าย imports มาไว้ที่ module level
25
- from unsloth import FastVisionModel
26
- from transformers import AutoModelForVision2Seq
27
  from transformers import TextStreamer
28
  import gradio as gr
29
  from huggingface_hub import login
@@ -31,55 +34,92 @@ from PIL import Image
31
 
32
  warnings.filterwarnings('ignore')
33
 
 
 
 
34
  if 'HUGGING_FACE_HUB_TOKEN' in os.environ:
35
  print("กำลังเข้าสู่ระบบ Hugging Face Hub...")
36
  login(token=os.environ['HUGGING_FACE_HUB_TOKEN'])
37
  else:
38
  print("คำเตือน: ไม่พบ HUGGING_FACE_HUB_TOKEN")
39
 
40
- # [แก้ 3] เพิ่ม @spaces.GPU decorator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  @spaces.GPU
42
- def model_context():
43
- _tokenizer = None
44
- _model = None
45
-
46
- def init_models():
47
- nonlocal _tokenizer, _model
48
- try:
49
- print("กำลังโหลด tokenizer...")
50
- # [แก้ 4] ลบ imports ออกจาก function
51
- base_model, _tokenizer = FastVisionModel.from_pretrained(
52
- "unsloth/Llama-3.2-11B-Vision-Instruct",
53
- use_gradient_checkpointing = "unsloth"
54
- )
55
- print("โหลด tokenizer สำเร็จ")
56
-
57
- print("กำลังโหลดโมเดล fine-tuned...")
58
- # [แก้ 5] ลบ import ออกจาก function
59
- _model = AutoModelForVision2Seq.from_pretrained(
60
- "Aekanun/Llama-3.2-11B-Vision-Instruct-XRay",
61
- load_in_4bit=True,
62
- torch_dtype=torch.float16
63
- ).to('cuda')
64
- FastVisionModel.for_inference(_model)
65
- print("โหลดโมเดลสำเร็จ!")
66
- return True
67
- except Exception as e:
68
- print(f"เกิดข้อผิดพลาดในการโหลดโมเดล: {str(e)}")
69
- return False
70
-
71
- def decorator(func):
72
- def wrapper(*args, **kwargs):
73
- return func(_model, _tokenizer, *args, **kwargs)
74
- return wrapper
75
-
76
- return init_models, decorator
77
-
78
- init_models, model_decorator = model_context()
79
-
80
- @model_decorator
81
  @spaces.GPU(duration=30)
82
- def process_image(_model, _tokenizer, image):
 
 
 
 
 
 
 
 
83
  if image is None:
84
  return "กรุณาอัพโหลดรูปภาพ"
85
 
@@ -87,6 +127,7 @@ def process_image(_model, _tokenizer, image):
87
  if not isinstance(image, Image.Image):
88
  image = Image.fromarray(image)
89
 
 
90
  instruction = "You are an expert radiographer. Describe accurately what you see in this image."
91
  messages = [
92
  {"role": "user", "content": [
@@ -95,17 +136,29 @@ def process_image(_model, _tokenizer, image):
95
  ]}
96
  ]
97
 
98
- input_text = _tokenizer.apply_chat_template(messages, add_generation_prompt=True)
99
- inputs = _tokenizer(
 
 
 
 
 
 
 
 
 
 
 
100
  image,
101
  input_text,
102
  add_special_tokens=False,
103
  return_tensors="pt",
104
  ).to("cuda")
 
105
 
106
- text_streamer = TextStreamer(_tokenizer, skip_prompt=True)
107
- outputs = _model.generate(
108
- **inputs,
109
  streamer=text_streamer,
110
  max_new_tokens=128,
111
  use_cache=True,
@@ -113,13 +166,13 @@ def process_image(_model, _tokenizer, image):
113
  min_p=0.1
114
  )
115
 
116
- return _tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
117
 
118
  except Exception as e:
119
  return f"เกิดข้อผิดพลาด: {str(e)}"
120
 
121
  print("กำลังเริ่มต้นแอปพลิเคชัน...")
122
- if init_models():
123
  demo = gr.Interface(
124
  fn=process_image,
125
  inputs=gr.Image(type="pil"),
 
12
  except Exception as e:
13
  print(f"Failed to install packages: {e}")
14
 
15
+
16
+ ###
17
+ # เพิ่มบรรทัดนี้ที่ต้นโค้ด ก่อน import torch
18
  import os
19
  os.environ['NVIDIA_VISIBLE_DEVICES'] = ''
20
+ ###
21
 
22
  import warnings
23
  import torch
24
+
25
+ # เปลี่ยนแปลงที่ 1: เพิ่มการตั้งค่า dynamo ก่อน import unsloth
26
  torch._dynamo.config.suppress_errors = True
27
  torch._dynamo.config.verbose = False
28
 
29
+
 
 
30
  from transformers import TextStreamer
31
  import gradio as gr
32
  from huggingface_hub import login
 
34
 
35
  warnings.filterwarnings('ignore')
36
 
37
+ model = None
38
+ tokenizer = None
39
+
40
  if 'HUGGING_FACE_HUB_TOKEN' in os.environ:
41
  print("กำลังเข้าสู่ระบบ Hugging Face Hub...")
42
  login(token=os.environ['HUGGING_FACE_HUB_TOKEN'])
43
  else:
44
  print("คำเตือน: ไม่พบ HUGGING_FACE_HUB_TOKEN")
45
 
46
+ # @spaces.GPU
47
+ # def load_model():
48
+ # global model, tokenizer
49
+ # print("กำลังโหลดโมเดล...")
50
+ # try:
51
+ # from unsloth import FastVisionModel
52
+ # # โหลด base model และ tokenizer แบบพื้นฐาน
53
+ # base_model, tokenizer = FastVisionModel.from_pretrained(
54
+ # "unsloth/Llama-3.2-11B-Vision-Instruct"
55
+ # )
56
+
57
+ # print("โหลด base model และ tokenizer สำเร็จ")
58
+
59
+ # # โหลดโมเดล fine-tuned แบบพื้นฐาน
60
+ # from transformers import AutoModelForVision2Seq
61
+ # model = AutoModelForVision2Seq.from_pretrained(
62
+ # "Aekanun/Llama-3.2-11B-Vision-Instruct-XRay"
63
+ # ).to('cuda')
64
+
65
+ # print("โหลดโมเดลสำเร็จ!")
66
+ # return True
67
+ # except Exception as e:
68
+ # print(f"เกิดข้อผิดพลาดในการโหลดโมเดล: {str(e)}")
69
+ # import traceback
70
+ # traceback.print_exc() # เพิ่มการแสดง stack trace
71
+ # return False
72
+
73
  @spaces.GPU
74
+ def load_model():
75
+ global model, tokenizer
76
+ print("กำลังโหลดโมเดล...")
77
+ try:
78
+ # โหลด tokenizer จาก base model
79
+ from unsloth import FastVisionModel
80
+ from transformers import AutoTokenizer
81
+ print("กำลังโหลด tokenizer...")
82
+ base_model, _tokenizer = FastVisionModel.from_pretrained(
83
+ "unsloth/Llama-3.2-11B-Vision-Instruct",
84
+ use_gradient_checkpointing = "unsloth"
85
+ )
86
+
87
+ tokenizer = _tokenizer # กำหนดค่าให้ตัวแปร global โดยตรง
88
+ print(f"2. ประเภทของ tokenizer: {type(tokenizer)}")
89
+ print(f"3. เมธอดที่มีใน tokenizer: {dir(tokenizer)}")
90
+ print("4. Global tokenizer after assignment:", type(tokenizer)) # เช็คค่า
91
+
92
+ print("โหลด base model และ tokenizer สำเร็จ กำลังโหลดโมเดลที่ fine-tune...")
93
+
94
+ # โหลดโมเดล fine-tuned
95
+ from transformers import AutoModelForVision2Seq
96
+ print("กำลังโหลดโมเดล fine-tuned...")
97
+ model = AutoModelForVision2Seq.from_pretrained(
98
+ "Aekanun/Llama-3.2-11B-Vision-Instruct-XRay",
99
+ load_in_4bit=True,
100
+ torch_dtype=torch.float16
101
+ ).to('cuda')
102
+
103
+ FastVisionModel.for_inference(model)
104
+ print("โหลดโมเดลสำเร็จ!")
105
+ return True
106
+
107
+ except Exception as e:
108
+ print(f"เกิดข้อผิดพลาดในการโหลดโมเดล: {str(e)}")
109
+ import traceback
110
+ traceback.print_exc()
111
+ return False
112
+
113
  @spaces.GPU(duration=30)
114
+ def process_image(image):
115
+ global model, tokenizer
116
+
117
+ print("Type of model:", type(model))
118
+ print("\nใน process_image():")
119
+ print("A. Type of tokenizer:", type(tokenizer))
120
+ if tokenizer is not None:
121
+ print("B. Available methods:", dir(tokenizer))
122
+
123
  if image is None:
124
  return "กรุณาอัพโหลดรูปภาพ"
125
 
 
127
  if not isinstance(image, Image.Image):
128
  image = Image.fromarray(image)
129
 
130
+ print("0. Image info:", type(image), image.size) # เพิ่ม debug ข้อมูลรูปภาพ
131
  instruction = "You are an expert radiographer. Describe accurately what you see in this image."
132
  messages = [
133
  {"role": "user", "content": [
 
136
  ]}
137
  ]
138
 
139
+ # input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
140
+ # inputs = tokenizer(
141
+ # image,
142
+ # input_text,
143
+ # add_special_tokens=False,
144
+ # return_tensors="pt",
145
+ # ).to("cuda")
146
+ print("1. Messages:", messages)
147
+
148
+ print("2. Tokenizer type:", type(tokenizer))
149
+ input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
150
+ print("3. Chat template success:", input_text[:100])
151
+ inputs = tokenizer(
152
  image,
153
  input_text,
154
  add_special_tokens=False,
155
  return_tensors="pt",
156
  ).to("cuda")
157
+ print("3. Tokenizer inputs:", inputs.keys()) # Debug 3
158
 
159
+ text_streamer = TextStreamer(tokenizer, skip_prompt=True)
160
+ outputs = model.generate(
161
+ **inputs,
162
  streamer=text_streamer,
163
  max_new_tokens=128,
164
  use_cache=True,
 
166
  min_p=0.1
167
  )
168
 
169
+ return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
170
 
171
  except Exception as e:
172
  return f"เกิดข้อผิดพลาด: {str(e)}"
173
 
174
  print("กำลังเริ่มต้นแอปพลิเคชัน...")
175
+ if load_model():
176
  demo = gr.Interface(
177
  fn=process_image,
178
  inputs=gr.Image(type="pil"),
app.py.origi DELETED
@@ -1,41 +0,0 @@
1
- import os
2
- import sys
3
- import subprocess
4
-
5
- def install_packages():
6
- print("Installing packages...")
7
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'pip'])
8
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'huggingface_hub'])
9
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'transformers'])
10
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'torch', '--index-url', 'https://download.pytorch.org/whl/cpu'])
11
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio'])
12
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'Pillow'])
13
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'bitsandbytes'])
14
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'accelerate'])
15
-
16
- if __name__ == "__main__":
17
- try:
18
- install_packages()
19
- print("Package installation completed")
20
-
21
- import gradio as gr
22
- import torch
23
- from transformers import AutoProcessor
24
-
25
- def process_handwriting(image):
26
- if image is None:
27
- return "กรุณาอัพโหลดรูปภาพ"
28
- return f"ทดสอบระบบ: Torch version: {torch.__version__}, Transformers installed"
29
-
30
- demo = gr.Interface(
31
- fn=process_handwriting,
32
- inputs=gr.Image(type="pil", label="อัพโหลดรูปภาพ"),
33
- outputs=gr.Textbox(label="ผลลัพธ์"),
34
- title="Test Installation",
35
- description="ทดสอบการติดตั้ง libraries"
36
- )
37
-
38
- demo.launch()
39
- except Exception as e:
40
- print(f"Error occurred: {str(e)}")
41
- raise e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py.origi1 ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ import sys
4
+ import subprocess
5
+
6
+ def install_packages():
7
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "unsloth-zoo"])
8
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-deps", "git+https://github.com/unslothai/unsloth.git"])
9
+
10
+ try:
11
+ install_packages()
12
+ except Exception as e:
13
+ print(f"Failed to install packages: {e}")
14
+
15
+ # [แก้ 1] ย้าย environment variables มาไว้ก่อน imports
16
+ import os
17
+ os.environ['NVIDIA_VISIBLE_DEVICES'] = ''
18
+
19
+ import warnings
20
+ import torch
21
+ torch._dynamo.config.suppress_errors = True
22
+ torch._dynamo.config.verbose = False
23
+
24
+ # [แก้ 2] ย้าย imports มาไว้ที่ module level
25
+ from unsloth import FastVisionModel
26
+ from transformers import AutoModelForVision2Seq
27
+ from transformers import TextStreamer
28
+ import gradio as gr
29
+ from huggingface_hub import login
30
+ from PIL import Image
31
+
32
+ warnings.filterwarnings('ignore')
33
+
34
+ if 'HUGGING_FACE_HUB_TOKEN' in os.environ:
35
+ print("กำลังเข้าสู่ระบบ Hugging Face Hub...")
36
+ login(token=os.environ['HUGGING_FACE_HUB_TOKEN'])
37
+ else:
38
+ print("คำเตือน: ไม่พบ HUGGING_FACE_HUB_TOKEN")
39
+
40
+ # [แก้ 3] เพิ่ม @spaces.GPU decorator
41
+ @spaces.GPU
42
+ def model_context():
43
+ _tokenizer = None
44
+ _model = None
45
+
46
+ def init_models():
47
+ nonlocal _tokenizer, _model
48
+ try:
49
+ print("กำลังโหลด tokenizer...")
50
+ # [แก้ 4] ลบ imports ออกจาก function
51
+ base_model, _tokenizer = FastVisionModel.from_pretrained(
52
+ "unsloth/Llama-3.2-11B-Vision-Instruct",
53
+ use_gradient_checkpointing = "unsloth"
54
+ )
55
+ print("โหลด tokenizer สำเร็จ")
56
+
57
+ print("กำลังโหลดโมเดล fine-tuned...")
58
+ # [แก้ 5] ลบ import ออกจาก function
59
+ _model = AutoModelForVision2Seq.from_pretrained(
60
+ "Aekanun/Llama-3.2-11B-Vision-Instruct-XRay",
61
+ load_in_4bit=True,
62
+ torch_dtype=torch.float16
63
+ ).to('cuda')
64
+ FastVisionModel.for_inference(_model)
65
+ print("โหลดโมเดลสำเร็จ!")
66
+ return True
67
+ except Exception as e:
68
+ print(f"เกิดข้อผิดพลาดในการโหลดโมเดล: {str(e)}")
69
+ return False
70
+
71
+ def decorator(func):
72
+ def wrapper(*args, **kwargs):
73
+ return func(_model, _tokenizer, *args, **kwargs)
74
+ return wrapper
75
+
76
+ return init_models, decorator
77
+
78
+ init_models, model_decorator = model_context()
79
+
80
+ @model_decorator
81
+ @spaces.GPU(duration=30)
82
+ def process_image(_model, _tokenizer, image):
83
+ if image is None:
84
+ return "กรุณาอัพโหลดรูปภาพ"
85
+
86
+ try:
87
+ if not isinstance(image, Image.Image):
88
+ image = Image.fromarray(image)
89
+
90
+ instruction = "You are an expert radiographer. Describe accurately what you see in this image."
91
+ messages = [
92
+ {"role": "user", "content": [
93
+ {"type": "image"},
94
+ {"type": "text", "text": instruction}
95
+ ]}
96
+ ]
97
+
98
+ input_text = _tokenizer.apply_chat_template(messages, add_generation_prompt=True)
99
+ inputs = _tokenizer(
100
+ image,
101
+ input_text,
102
+ add_special_tokens=False,
103
+ return_tensors="pt",
104
+ ).to("cuda")
105
+
106
+ text_streamer = TextStreamer(_tokenizer, skip_prompt=True)
107
+ outputs = _model.generate(
108
+ **inputs,
109
+ streamer=text_streamer,
110
+ max_new_tokens=128,
111
+ use_cache=True,
112
+ temperature=1.5,
113
+ min_p=0.1
114
+ )
115
+
116
+ return _tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
117
+
118
+ except Exception as e:
119
+ return f"เกิดข้อผิดพลาด: {str(e)}"
120
+
121
+ print("กำลังเริ่มต้นแอปพลิเคชัน...")
122
+ if init_models():
123
+ demo = gr.Interface(
124
+ fn=process_image,
125
+ inputs=gr.Image(type="pil"),
126
+ outputs=gr.Textbox(),
127
+ title="Medical Vision Analysis"
128
+ )
129
+
130
+ if __name__ == "__main__":
131
+ demo.launch()
132
+ else:
133
+ print("ไม่สามารถเริ่มต้นแอปพลิเคชันได้")