Datasets:
				
			
			
	
			
	
		
			
	
		Size:
	
	
	
	
	10B<n<100B
	
	
	| import gzip | |
| import msgspec | |
| import pathlib | |
| import tqdm | |
| def read_lines_jsonl(file_name): | |
| with gzip.open(file_name, "rb") as file_handle: | |
| buffer = b"" | |
| while True: | |
| chunk = file_handle.read((2**31) * 2) | |
| if not chunk: | |
| break | |
| lines = (buffer + chunk).split(b"\n") | |
| for line in lines[:-1]: | |
| yield line.strip(), file_handle.tell() | |
| buffer = lines[-1] | |
| decoder = msgspec.json.Decoder() | |
| with gzip.open( | |
| pathlib.Path("v2_SuperWikiFigures/wikidata/latest-all.json.gz"), "rb" | |
| ) as fp: | |
| for line in tqdm.tqdm(fp): | |
| if len(line) <= 2: | |
| continue | |
| if line[-2] == 44: | |
| line = line.rstrip(b",\n") | |
| wkdata: dict = decoder.decode(line) | |
| if wkdata["id"].startswith("M"): | |
| print(wkdata["id"]) | |

