elliesleightholm commited on
Commit
36197d8
·
verified ·
1 Parent(s): e3f8b6e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +27 -26
README.md CHANGED
@@ -23,6 +23,33 @@ They are over 30% better compared to Amazon Titan Embedding services for e-comme
23
  | Marqo-Ecommerce-B | 203 | 768 | Marqo/marqo-ecommerce-embeddings-B | [link](https://marqo-gcl-public.s3.us-west-2.amazonaws.com/marqo-general-ecomm/marqo-ecomm-embeddings-b.pt) |
24
  | Marqo-Ecommerce-L | 652 | 1024 | Marqo/marqo-ecommerce-embeddings-L | [link](https://marqo-gcl-public.s3.us-west-2.amazonaws.com/marqo-general-ecomm/marqo-ecomm-embeddings-l.pt) |
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  ### HuggingFace with OpenCLIP
27
  ```
28
  pip install open_clip_torch
@@ -56,32 +83,6 @@ with torch.no_grad(), torch.cuda.amp.autocast():
56
  print("Label probs:", text_probs)
57
  # [9.9955e-01, 4.4712e-04, 4.4010e-06]]
58
  ```
59
- ### HuggingFace with transformers
60
- ```python
61
- from transformers import AutoModel, AutoProcessor
62
- import torch
63
- from PIL import Image
64
- import requests
65
- # model_name= 'Marqo/marqo-ecommerce-embeddings-L'
66
- model_name = 'Marqo/marqo-ecommerce-embeddings-B'
67
-
68
- model_1 = AutoModel.from_pretrained(model_name, trust_remote_code=True)
69
- processor_1 = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
70
-
71
- img = Image.open(requests.get('https://raw.githubusercontent.com/marqo-ai/marqo-FashionCLIP/main/docs/fashion-hippo.png', stream=True).raw).convert("RGB")
72
- image_1 = [img]
73
- text_1 = ["a hat", "a t-shirt", "shoes"]
74
- processed_1 = processor_1(text=text_1, images=image_1, padding='max_length', return_tensors="pt")
75
- processor_1.image_processor.do_rescale = False
76
- with torch.no_grad():
77
- image_features_1 = model_1.get_image_features(processed_1['pixel_values'], normalize=True)
78
- text_features_1 = model_1.get_text_features(processed_1['input_ids'], normalize=True)
79
-
80
- text_probs_1 = (100 * image_features_1 @ text_features_1.T).softmax(dim=-1)
81
-
82
- print(text_probs_1)
83
- # [9.9955e-01, 4.4712e-04, 4.4010e-06]]
84
- ```
85
 
86
  ### Evaluation with GCL
87
  ```
 
23
  | Marqo-Ecommerce-B | 203 | 768 | Marqo/marqo-ecommerce-embeddings-B | [link](https://marqo-gcl-public.s3.us-west-2.amazonaws.com/marqo-general-ecomm/marqo-ecomm-embeddings-b.pt) |
24
  | Marqo-Ecommerce-L | 652 | 1024 | Marqo/marqo-ecommerce-embeddings-L | [link](https://marqo-gcl-public.s3.us-west-2.amazonaws.com/marqo-general-ecomm/marqo-ecomm-embeddings-l.pt) |
25
 
26
+ ### HuggingFace with transformers
27
+ ```python
28
+ from transformers import AutoModel, AutoProcessor
29
+ import torch
30
+ from PIL import Image
31
+ import requests
32
+ # model_name= 'Marqo/marqo-ecommerce-embeddings-L'
33
+ model_name = 'Marqo/marqo-ecommerce-embeddings-B'
34
+
35
+ model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
36
+ processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
37
+
38
+ img = Image.open(requests.get('https://raw.githubusercontent.com/marqo-ai/marqo-FashionCLIP/main/docs/fashion-hippo.png', stream=True).raw).convert("RGB")
39
+ image = [img]
40
+ text = ["a hat", "a t-shirt", "shoes"]
41
+ processed = processor(text=text, images=image, padding='max_length', return_tensors="pt")
42
+ processor.image_processor.do_rescale = False
43
+ with torch.no_grad():
44
+ image_features = model.get_image_features(processed['pixel_values'], normalize=True)
45
+ text_features = model.get_text_features(processed['input_ids'], normalize=True)
46
+
47
+ text_probs = (100 * image_features @ text_features.T).softmax(dim=-1)
48
+
49
+ print(text_probs)
50
+ # [9.9955e-01, 4.4712e-04, 4.4010e-06]]
51
+ ```
52
+
53
  ### HuggingFace with OpenCLIP
54
  ```
55
  pip install open_clip_torch
 
83
  print("Label probs:", text_probs)
84
  # [9.9955e-01, 4.4712e-04, 4.4010e-06]]
85
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  ### Evaluation with GCL
88
  ```