import json import os import logging import argparse from PIL import Image from datasets import Dataset import io # Configure logging for detailed output logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def load_questions_from_meta_qa(meta_qa_file): with open(meta_qa_file, "r") as f: questions = [line.strip() for line in f if line.strip()] return questions def process_parquet_files(data_dir, output_jsonl, meta_qa_file=None, output_imgs=None, process_qa=False): """ Process Parquet files to generate a JSONL file with optional image export and QA list creation. Args: data_dir (str): Directory containing Parquet files. output_jsonl (str): Output JSONL file path. meta_qa_file (str, optional): Path to the meta_qa_en.txt file for QA list creation. output_imgs (str, optional): Directory path to save images. If None, images are not saved. process_qa (bool): Whether to process and include QA pairs in the output. Returns: None """ if output_imgs and not os.path.exists(output_imgs): os.makedirs(output_imgs) # Load questions only if QA processing is enabled questions = None if process_qa and meta_qa_file: questions = load_questions_from_meta_qa(meta_qa_file) jsonl_data = [] parquet_files = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith(".parquet")] for parquet_file in parquet_files: dataset = Dataset.from_parquet(parquet_file) for row in dataset: json_item = { "internal_id": row["internal_id"], "url": row["url"], "annotation": row["annotation"], "meta_result": row["meta_result"], "meta_mask": row["meta_mask"], } # Optionally save images if output_imgs: img_data = row["image"] img_path = os.path.join(output_imgs, f"{row['internal_id']}.jpg") try: with open(img_path, "wb") as img_file: img_file.write(img_data) json_item["image_path"] = img_path except Exception as e: logger.error(f"Error saving image for internal_id {row['internal_id']}: {e}") # Optionally process QA pairs if process_qa and questions: qa_list = [] meta_result = row["meta_result"] meta_mask = row["meta_mask"] for idx, mask in enumerate(meta_mask): if mask == 1: # Process questions only if the mask is 1 question = questions[idx] answer = 'yes' if meta_result[idx] == 1 else 'no' qa_list.append({"question": question, "answer": answer}) json_item["qa_list"] = qa_list jsonl_data.append(json_item) with open(output_jsonl, "w") as outfile: for json_item in jsonl_data: outfile.write(json.dumps(json_item) + "\n") logger.info(f"Finished writing JSONL file with {len(jsonl_data)} items.") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Convert VisionReward Parquet dataset files to JSONL format with optional image extraction and QA list generation.") parser.add_argument("--data_dir", type=str, default='data', help="Directory containing Parquet files.") parser.add_argument("--output_jsonl", type=str, default='annotation.jsonl', help="Path to the output JSONL file.") parser.add_argument("--meta_qa_file", type=str, default="meta_qa_en.txt", help="Optional: Path to the meta_qa_en.txt file for QA list generation.") parser.add_argument("--save_imgs", action="store_true", help="Optional: Whether to save images.") parser.add_argument("--process_qa", action="store_true", help="Optional: Process and include QA pairs in the output.") args = parser.parse_args() output_imgs = 'imgs' if args.save_imgs else None process_parquet_files( data_dir=args.data_dir, output_jsonl=args.output_jsonl, meta_qa_file=args.meta_qa_file, output_imgs=output_imgs, process_qa=args.process_qa )