Datasets:

Modalities:
Text
Libraries:
Datasets
wikisql_VALUE / wikisql_VALUE.py
cjziems's picture
Upload wikisql_VALUE.py
264e852
"""A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
import json
import os
import textwrap
import datasets
_CITATION = """\
@article{zhongSeq2SQL2017,
author = {Victor Zhong and
Caiming Xiong and
Richard Socher},
title = {Seq2SQL: Generating Structured Queries from Natural Language using
Reinforcement Learning},
journal = {CoRR},
volume = {abs/1709.00103},
year = {2017}
}
"""
_DESCRIPTION = """\
A large crowd-sourced dataset for developing natural language interfaces for relational databases
"""
_DATA_URL = "https://huggingface.co/datasets/SALT-NLP/wikisql_VALUE/resolve/main/data.zip"
_AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
_COND_OPS = ["=", ">", "<", "OP"]
class WikiSQLConfig(datasets.BuilderConfig):
"""BuilderConfig for WikiSQL."""
def __init__(
self,
name,
description,
train_path,
dev_path,
test_path,
**kwargs
):
super(WikiSQLConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
self.features = datasets.Features(
{
"phase": datasets.Value("int32"),
"question": datasets.Value("string"),
"table": {
"header": datasets.features.Sequence(datasets.Value("string")),
"page_title": datasets.Value("string"),
"page_id": datasets.Value("string"),
"types": datasets.features.Sequence(datasets.Value("string")),
"id": datasets.Value("string"),
"section_title": datasets.Value("string"),
"caption": datasets.Value("string"),
"rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"name": datasets.Value("string"),
},
"sql": {
"human_readable": datasets.Value("string"),
"sel": datasets.Value("int32"),
"agg": datasets.Value("int32"),
"conds": datasets.features.Sequence(
{
"column_index": datasets.Value("int32"),
"operator_index": datasets.Value("int32"),
"condition": datasets.Value("string"),
}
),
},
}
)
self.name = name
self.description = description
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
class WikiSQL(datasets.GeneratorBasedBuilder):
"""WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
WikiSQLConfig(
name="AppE",
description=textwrap.dedent(
"""\
An Appalachian English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
),
train_path="train_AppE.jsonl",
dev_path="dev_AppE.jsonl",
test_path="test_AppE.jsonl"
),
WikiSQLConfig(
name="ChcE",
description=textwrap.dedent(
"""\
A Chicano English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
),
train_path="train_ChcE.jsonl",
dev_path="dev_ChcE.jsonl",
test_path="test_ChcE.jsonl"
),
WikiSQLConfig(
name="CollSgE",
description=textwrap.dedent(
"""\
A Singapore English (Singlish) variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
),
train_path="train_CollSgE.jsonl",
dev_path="dev_CollSgE.jsonl",
test_path="test_CollSgE.jsonl"
),
WikiSQLConfig(
name="IndE",
description=textwrap.dedent(
"""\
An Indian English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
),
train_path="train_IndE.jsonl",
dev_path="dev_IndE.jsonl",
test_path="test_IndE.jsonl"
),
WikiSQLConfig(
name="UAAVE",
description=textwrap.dedent(
"""\
An Urban African American English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
),
train_path="train_UAAVE.jsonl",
dev_path="dev_UAAVE.jsonl",
test_path="test_UAAVE.jsonl"
),
WikiSQLConfig(
name="MULTI",
description=textwrap.dedent(
"""\
A mixed-dialectal variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
),
train_path="train_MULTI.jsonl",
dev_path="dev_MULTI.jsonl",
test_path="test_MULTI.jsonl"
),
]
def _info(self):
return datasets.DatasetInfo(
description=self.config.description,
features=self.config.features,
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/salesforce/WikiSQL",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_DATA_URL)
dl_dir = os.path.join(dl_dir, "data")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, self.config.test_path),
"tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, self.config.dev_path),
"tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, self.config.train_path),
"tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
},
)
]
def _convert_to_human_readable(self, sel, agg, columns, conditions):
"""Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10"""
rep = f"SELECT {_AGG_OPS[agg]} {columns[sel] if columns is not None else f'col{sel}'} FROM table"
if conditions:
rep += " WHERE " + " AND ".join([f"{columns[i]} {_COND_OPS[o]} {v}" for i, o, v in conditions])
return " ".join(rep.split())
def _generate_examples(self, main_filepath, tables_filepath):
"""Yields examples."""
# Build dictionary to table_ids:tables
with open(tables_filepath, encoding="utf-8") as f:
tables = [json.loads(line) for line in f]
id_to_tables = {x["id"]: x for x in tables}
with open(main_filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
row = json.loads(line)
row["table"] = id_to_tables[row["table_id"]]
del row["table_id"]
# Handle missing data
row["table"]["page_title"] = row["table"].get("page_title", "")
row["table"]["section_title"] = row["table"].get("section_title", "")
row["table"]["caption"] = row["table"].get("caption", "")
row["table"]["name"] = row["table"].get("name", "")
row["table"]["page_id"] = str(row["table"].get("page_id", ""))
# Fix row types
row["table"]["rows"] = [[str(e) for e in r] for r in row["table"]["rows"]]
# Get human-readable version
row["sql"]["human_readable"] = self._convert_to_human_readable(
row["sql"]["sel"],
row["sql"]["agg"],
row["table"]["header"],
row["sql"]["conds"],
)
# Restructure sql->conds
# - wikiSQL provides a tuple [column_index, operator_index, condition]
# as 'condition' can have 2 types (float or str) we convert to dict
for i in range(len(row["sql"]["conds"])):
row["sql"]["conds"][i] = {
"column_index": row["sql"]["conds"][i][0],
"operator_index": row["sql"]["conds"][i][1],
"condition": str(row["sql"]["conds"][i][2]),
}
yield idx, row