File size: 2,619 Bytes
be6d0f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
'''
python ds_add_emb.py svjack/Prince_Xiang_iclight_v2 image --output_path Prince_Xiang_iclight_v2_emb

python ds_add_emb.py svjack/Prince_Xiang_PhotoMaker_V2_10 image1 image2 --output_path Prince_Xiang_PhotoMaker_V2_10_emb

python ds_add_emb.py svjack/Prince_Xiang_ConsistentID_SDXL_10 image --output_path Prince_Xiang_ConsistentID_SDXL_10_emb

python ds_add_emb.py svjack/Prince_Xiang_PhotoMaker_V2_1280 image1 image2 --output_path Prince_Xiang_PhotoMaker_V2_1280_emb

python ds_add_emb.py svjack/Prince_Xiang_ConsistentID_SDXL_1280 image --output_path Prince_Xiang_ConsistentID_SDXL_1280_emb

'''

import argparse
from datasets import load_dataset
from gradio_client import Client, handle_file
import os 
from uuid import uuid1

def process_images(repo_id, image_columns, gradio_url, output_path):
    # 加载数据集
    dataset = load_dataset(repo_id, split='train')

    # 初始化Gradio Client
    client = Client(gradio_url)

    # 对每个图片列进行处理
    for col in image_columns:
        print(f"Processing column: {col}")
        embeddings = []
        for idx, image_path in enumerate(dataset[col]):
            print(f"Processing image {idx+1}/{len(dataset[col])} in column {col}")
            name = "{}.png".format(uuid1())
            image_path.save(name)
            try:
                result = client.predict(
                    image=handle_file(name),
                    api_name="/predict"
                )
                embeddings.append(result['embedding'])  # 假设返回的字典中有'embedding'键
            except Exception as e:
                print(f"Error processing image {idx+1}/{len(dataset[col])} in column {col}: {e}")
                embeddings.append(None)
            os.remove(name)
        
        # 将结果添加到数据集中
        dataset = dataset.add_column(f"{col}_embedding", embeddings)

    # 保存处理后的数据集
    dataset.save_to_disk(output_path)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Process images in a Hugging Face dataset using a Gradio API.")
    parser.add_argument("repo_id", type=str, help="Hugging Face dataset repo ID")
    parser.add_argument("image_columns", type=str, nargs='+', help="List of image column names")
    parser.add_argument("--gradio_url", type=str, default="http://127.0.0.1:7860", help="Gradio API URL")
    parser.add_argument("--output_path", type=str, default="processed_dataset", help="Output path to save the processed dataset")

    args = parser.parse_args()

    process_images(args.repo_id, args.image_columns, args.gradio_url, args.output_path)