cwolff commited on
Commit
2e6fb29
·
1 Parent(s): b4338b2

feat: Pushed new scripts

Browse files
Files changed (2) hide show
  1. convert_to_hf.py +0 -1
  2. push_to_hf.py +156 -0
convert_to_hf.py CHANGED
@@ -238,7 +238,6 @@ def main():
238
 
239
  with gzip.open("schemapile-perm.json.gz", "rt", encoding="utf-8") as f:
240
  data = json.loads(f.read())
241
-
242
  records = transform(data)
243
 
244
  # Emit
 
238
 
239
  with gzip.open("schemapile-perm.json.gz", "rt", encoding="utf-8") as f:
240
  data = json.loads(f.read())
 
241
  records = transform(data)
242
 
243
  # Emit
push_to_hf.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Push a JSONL file into a Hugging Face Datasets repository.
4
+
5
+ Usage:
6
+ python push_to_hf.py \
7
+ --jsonl /path/to/data.jsonl \
8
+ --repo-id username/my-dataset \
9
+ --split-name train \
10
+ --private \
11
+ --commit-message "Initial upload"
12
+
13
+ Auth:
14
+ - Set an environment variable HF_TOKEN with a write-access token, or
15
+ - Pass --token YOUR_TOKEN
16
+
17
+ Notes:
18
+ - Requires: datasets>=2.14.0, huggingface_hub>=0.23.0
19
+ - If the repo doesn't exist, it will be created.
20
+ """
21
+ import argparse
22
+ import os
23
+ import sys
24
+ from typing import List, Optional
25
+
26
+ from datasets import load_dataset, Dataset, DatasetDict # type: ignore
27
+ from huggingface_hub import HfApi # type: ignore
28
+
29
+
30
+ def parse_args() -> argparse.Namespace:
31
+ p = argparse.ArgumentParser(description="Push JSONL to a Hugging Face dataset repo")
32
+ p.add_argument(
33
+ "--jsonl",
34
+ nargs=+1,
35
+ required=True,
36
+ help="Path(s) to JSONL file(s). You can pass multiple to concatenate.",
37
+ )
38
+ p.add_argument(
39
+ "--repo-id",
40
+ required=True,
41
+ help="Target dataset repo like 'username/dataset_name'",
42
+ )
43
+ p.add_argument(
44
+ "--split-name",
45
+ default="train",
46
+ help="Dataset split name to assign (default: train)",
47
+ )
48
+ p.add_argument(
49
+ "--private",
50
+ action="store_true",
51
+ help="Create the dataset repo as private if it doesn't exist",
52
+ )
53
+ p.add_argument(
54
+ "--commit-message",
55
+ default="Add dataset",
56
+ help="Commit message for the upload",
57
+ )
58
+ p.add_argument(
59
+ "--token",
60
+ default=os.environ.get("HF_TOKEN"),
61
+ help="HF API token (defaults to HF_TOKEN env var)",
62
+ )
63
+ p.add_argument(
64
+ "--max-shard-size",
65
+ default="500MB",
66
+ help="Max shard size used by push_to_hub (e.g., '500MB', '1GB')",
67
+ )
68
+ return p.parse_args()
69
+
70
+
71
+ def ensure_repo(repo_id: str, token: Optional[str], private: bool) -> None:
72
+ api = HfApi()
73
+ try:
74
+ api.create_repo(
75
+ repo_id=repo_id,
76
+ repo_type="dataset",
77
+ private=private,
78
+ exist_ok=True,
79
+ token=token,
80
+ )
81
+ print(f"✔️ Ensured dataset repo exists: {repo_id} (private={private})")
82
+ except Exception as e:
83
+ print(f"❌ Failed to ensure/create repo '{repo_id}': {e}")
84
+ sys.exit(1)
85
+
86
+
87
+ def load_jsonl_as_dataset(files: List[str]) -> Dataset:
88
+ # datasets will auto-detect JSON Lines when using the 'json' builder
89
+ print(f"📦 Loading JSONL: {files}")
90
+ ds = load_dataset("json", data_files=files, split="train") # type: ignore
91
+ # Basic sanity check
92
+ print(f"✅ Loaded {len(ds):,} rows with columns: {list(ds.features.keys())}")
93
+ return ds
94
+
95
+
96
+ def push_dataset(
97
+ ds: Dataset,
98
+ repo_id: str,
99
+ split_name: str,
100
+ token: Optional[str],
101
+ commit_message: str,
102
+ max_shard_size: str,
103
+ ) -> None:
104
+ print(
105
+ f"🚀 Pushing split='{split_name}' to https://huggingface.co/datasets/{repo_id} ..."
106
+ )
107
+ # Push a single split; this will create a DatasetDict with that split on the Hub
108
+ ds.push_to_hub(
109
+ repo_id=repo_id,
110
+ split=split_name,
111
+ token=token,
112
+ commit_message=commit_message,
113
+ max_shard_size=max_shard_size,
114
+ )
115
+ print("🎉 Upload complete!")
116
+
117
+
118
+ def main() -> None:
119
+ args = parse_args()
120
+
121
+ if not args.token:
122
+ print(
123
+ "❌ No token provided. Set HF_TOKEN env var or pass --token.\n"
124
+ " Create a token at https://huggingface.co/settings/tokens"
125
+ )
126
+ sys.exit(2)
127
+
128
+ # Expand paths and verify they exist
129
+ files = []
130
+ for f in args.jsonl:
131
+ f = os.path.expanduser(f)
132
+ if not os.path.isfile(f):
133
+ print(f"❌ File not found: {f}")
134
+ sys.exit(2)
135
+ files.append(f)
136
+
137
+ ensure_repo(args.repo_id, args.token, args.private)
138
+
139
+ ds = load_jsonl_as_dataset(files)
140
+
141
+ # Optional: cast to string for problematic columns (commented; uncomment if needed)
142
+ # from datasets import Features, Value
143
+ # ds = ds.cast(Features({k: Value("string") for k in ds.features}))
144
+
145
+ push_dataset(
146
+ ds=ds,
147
+ repo_id=args.repo_id,
148
+ split_name=args.split_name,
149
+ token=args.token,
150
+ commit_message=args.commit_message,
151
+ max_shard_size=args.max_shard_size,
152
+ )
153
+
154
+
155
+ if __name__ == "__main__":
156
+ main()