Spaces:
Sleeping
Sleeping
Commit
·
13eeb4f
1
Parent(s):
488f10b
Updating to get the model imports to work
Browse files- app.py +13 -13
- requirements.txt +0 -4
app.py
CHANGED
@@ -1,18 +1,19 @@
|
|
1 |
import numpy as np
|
2 |
from scipy import signal
|
3 |
-
import
|
|
|
4 |
import streamlit as st
|
5 |
# from transformers import AutoModel
|
6 |
-
from transformers import TFAutoModel
|
7 |
# Needed for importing torch to use in the transformers model
|
8 |
-
import torch
|
9 |
-
import tensorflow
|
10 |
import matplotlib.pyplot as plt
|
11 |
# HELLO HUGGING FACE
|
12 |
|
13 |
|
14 |
def basic_box_array(image_size):
|
15 |
-
A =
|
16 |
# Creates the outside edges of the box
|
17 |
for i in range(image_size):
|
18 |
for j in range(image_size):
|
@@ -22,7 +23,7 @@ def basic_box_array(image_size):
|
|
22 |
|
23 |
|
24 |
def back_slash_array(image_size):
|
25 |
-
A =
|
26 |
for i in range(image_size):
|
27 |
for j in range(image_size):
|
28 |
if i == j:
|
@@ -31,7 +32,7 @@ def back_slash_array(image_size):
|
|
31 |
|
32 |
|
33 |
def forward_slash_array(image_size):
|
34 |
-
A =
|
35 |
for i in range(image_size):
|
36 |
for j in range(image_size):
|
37 |
if i == (image_size - 1) - j:
|
@@ -41,7 +42,7 @@ def forward_slash_array(image_size):
|
|
41 |
|
42 |
def hot_dog_array(image_size):
|
43 |
# Places pixels down the vertical axis to split the box
|
44 |
-
A =
|
45 |
for i in range(image_size):
|
46 |
for j in range(image_size):
|
47 |
if j == math.floor((image_size - 1) / 2) or j == math.ceil((image_size - 1) / 2):
|
@@ -51,7 +52,7 @@ def hot_dog_array(image_size):
|
|
51 |
|
52 |
def hamburger_array(image_size):
|
53 |
# Places pixels across the horizontal axis to split the box
|
54 |
-
A =
|
55 |
for i in range(image_size):
|
56 |
for j in range(image_size):
|
57 |
if i == math.floor((image_size - 1) / 2) or i == math.ceil((image_size - 1) / 2):
|
@@ -60,7 +61,7 @@ def hamburger_array(image_size):
|
|
60 |
|
61 |
|
62 |
def center_array(image_size):
|
63 |
-
A =
|
64 |
for i in range(image_size):
|
65 |
for j in range(image_size):
|
66 |
if i == math.floor((image_size - 1) / 2) and j == math.ceil((image_size - 1) / 2):
|
@@ -86,7 +87,7 @@ def update_array(array_original, array_new, image_size):
|
|
86 |
def add_pixels(array_original, additional_pixels, image_size):
|
87 |
# Adds pixels to the thickness of each component of the box
|
88 |
A = array_original
|
89 |
-
A_updated =
|
90 |
for dens in range(additional_pixels):
|
91 |
for i in range(1, image_size - 1):
|
92 |
for j in range(1, image_size - 1):
|
@@ -278,7 +279,6 @@ if st.button("Generate Endpoint Images"):
|
|
278 |
plt.figure(1)
|
279 |
st.pyplot(plt.figure(1))
|
280 |
|
281 |
-
'''
|
282 |
# Load the models from existing huggingface model
|
283 |
# Load the encoder model
|
284 |
# encoder_model_boxes = huggingface_hub.from_pretrained_keras("cmudrc/2d-lattice-encoder")
|
@@ -286,5 +286,5 @@ encoder_model = TFAutoModel.from_pretrained("cmudrc/2d-lattice-encoder")
|
|
286 |
# Load the decoder model
|
287 |
# decoder_model_boxes = huggingface_hub.from_pretrained_keras("cmudrc/2d-lattice-decoder")
|
288 |
decoder_model = TFAutoModel.from_pretrained("cmudrc/2d-lattice-decoder")
|
289 |
-
|
290 |
|
|
|
1 |
import numpy as np
|
2 |
from scipy import signal
|
3 |
+
import math
|
4 |
+
# import huggingface_hub # for loading model
|
5 |
import streamlit as st
|
6 |
# from transformers import AutoModel
|
7 |
+
# from transformers import TFAutoModel
|
8 |
# Needed for importing torch to use in the transformers model
|
9 |
+
# import torch
|
10 |
+
# import tensorflow
|
11 |
import matplotlib.pyplot as plt
|
12 |
# HELLO HUGGING FACE
|
13 |
|
14 |
|
15 |
def basic_box_array(image_size):
|
16 |
+
A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
17 |
# Creates the outside edges of the box
|
18 |
for i in range(image_size):
|
19 |
for j in range(image_size):
|
|
|
23 |
|
24 |
|
25 |
def back_slash_array(image_size):
|
26 |
+
A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
27 |
for i in range(image_size):
|
28 |
for j in range(image_size):
|
29 |
if i == j:
|
|
|
32 |
|
33 |
|
34 |
def forward_slash_array(image_size):
|
35 |
+
A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
36 |
for i in range(image_size):
|
37 |
for j in range(image_size):
|
38 |
if i == (image_size - 1) - j:
|
|
|
42 |
|
43 |
def hot_dog_array(image_size):
|
44 |
# Places pixels down the vertical axis to split the box
|
45 |
+
A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
46 |
for i in range(image_size):
|
47 |
for j in range(image_size):
|
48 |
if j == math.floor((image_size - 1) / 2) or j == math.ceil((image_size - 1) / 2):
|
|
|
52 |
|
53 |
def hamburger_array(image_size):
|
54 |
# Places pixels across the horizontal axis to split the box
|
55 |
+
A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
56 |
for i in range(image_size):
|
57 |
for j in range(image_size):
|
58 |
if i == math.floor((image_size - 1) / 2) or i == math.ceil((image_size - 1) / 2):
|
|
|
61 |
|
62 |
|
63 |
def center_array(image_size):
|
64 |
+
A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
65 |
for i in range(image_size):
|
66 |
for j in range(image_size):
|
67 |
if i == math.floor((image_size - 1) / 2) and j == math.ceil((image_size - 1) / 2):
|
|
|
87 |
def add_pixels(array_original, additional_pixels, image_size):
|
88 |
# Adds pixels to the thickness of each component of the box
|
89 |
A = array_original
|
90 |
+
A_updated = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
|
91 |
for dens in range(additional_pixels):
|
92 |
for i in range(1, image_size - 1):
|
93 |
for j in range(1, image_size - 1):
|
|
|
279 |
plt.figure(1)
|
280 |
st.pyplot(plt.figure(1))
|
281 |
|
|
|
282 |
# Load the models from existing huggingface model
|
283 |
# Load the encoder model
|
284 |
# encoder_model_boxes = huggingface_hub.from_pretrained_keras("cmudrc/2d-lattice-encoder")
|
|
|
286 |
# Load the decoder model
|
287 |
# decoder_model_boxes = huggingface_hub.from_pretrained_keras("cmudrc/2d-lattice-decoder")
|
288 |
decoder_model = TFAutoModel.from_pretrained("cmudrc/2d-lattice-decoder")
|
289 |
+
|
290 |
|
requirements.txt
CHANGED
@@ -1,8 +1,4 @@
|
|
1 |
-
huggingface_hub==0.12.0
|
2 |
matplotlib==3.5.2
|
3 |
numpy==1.21.5
|
4 |
scipy==1.9.1
|
5 |
streamlit==1.18.1
|
6 |
-
tensorflow==2.10.0
|
7 |
-
torch==2.0.0
|
8 |
-
transformers==4.26.0
|
|
|
|
|
1 |
matplotlib==3.5.2
|
2 |
numpy==1.21.5
|
3 |
scipy==1.9.1
|
4 |
streamlit==1.18.1
|
|
|
|
|
|