Research-EAI commited on
Commit
6f419c3
·
verified ·
1 Parent(s): ec58b95

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -10
README.md CHANGED
@@ -414,7 +414,7 @@ Domain and content type classification probabilities:
414
 
415
  ## How to Load the Dataset
416
 
417
- This section provides examples of how to load the `Research-EAI/eai-taxonomy-math-w-fm` dataset using different Python libraries and frameworks.
418
 
419
  ### Using Hugging Face Datasets (Standard Method)
420
 
@@ -424,7 +424,7 @@ The simplest way to load the dataset is using the Hugging Face `datasets` librar
424
  from datasets import load_dataset
425
 
426
  # Load the entire dataset
427
- dataset = load_dataset("Research-EAI/eai-taxonomy-math-w-fm")
428
 
429
  # View dataset structure
430
  print(dataset)
@@ -437,7 +437,7 @@ You can also load the dataset in streaming mode to avoid downloading the entire
437
  from datasets import load_dataset
438
 
439
  # Load in streaming mode
440
- dataset = load_dataset("Research-EAI/eai-taxonomy-math-w-fm", streaming=True)
441
  data_stream = dataset["train"]
442
 
443
  # Iterate through examples
@@ -457,10 +457,10 @@ import pyspark_huggingface
457
  from pyspark.sql import SparkSession
458
 
459
  # Initialize Spark session
460
- spark = SparkSession.builder.appName("EAI-Taxonomy-Math").getOrCreate()
461
 
462
  # Load the dataset using the "huggingface" data source
463
- df = spark.read.format("huggingface").load("Research-EAI/eai-taxonomy-math-w-fm")
464
 
465
  # Basic dataset exploration
466
  print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
@@ -471,14 +471,14 @@ df.printSchema()
471
  df_subset = (
472
  spark.read.format("huggingface")
473
  .option("columns", '["column1", "column2"]') # Replace with actual column names
474
- .load("Research-EAI/eai-taxonomy-math-w-fm")
475
  )
476
 
477
  # Run SQL queries on the dataset
478
- df.createOrReplaceTempView("eai_math_dataset")
479
  result = spark.sql("""
480
  SELECT COUNT(*) as total_examples
481
- FROM eai_math_dataset
482
  """)
483
  result.show()
484
  ```
@@ -491,7 +491,7 @@ Daft provides a modern DataFrame library optimized for machine learning workload
491
  import daft
492
 
493
  # Load the entire dataset
494
- df = daft.read_parquet("hf://datasets/Research-EAI/eai-taxonomy-math-w-fm")
495
 
496
  # Basic exploration
497
  print("Dataset schema:")
@@ -512,7 +512,7 @@ os.environ["HF_TOKEN"] = "your_huggingface_token_here"
512
 
513
  # Load with authentication
514
  df = daft.read_parquet(
515
- "hf://datasets/Research-EAI/eai-taxonomy-math-w-fm",
516
  hf_token=os.environ["HF_TOKEN"]
517
  )
518
  ```
 
414
 
415
  ## How to Load the Dataset
416
 
417
+ This section provides examples of how to load the `Research-EAI/essential-web-1t-sample-fdc-partitioned` dataset using different Python libraries and frameworks.
418
 
419
  ### Using Hugging Face Datasets (Standard Method)
420
 
 
424
  from datasets import load_dataset
425
 
426
  # Load the entire dataset
427
+ dataset = load_dataset("EssentialAI/essential-web-1t-sample-fdc-partitioned")
428
 
429
  # View dataset structure
430
  print(dataset)
 
437
  from datasets import load_dataset
438
 
439
  # Load in streaming mode
440
+ dataset = load_dataset("EssentialAI/essential-web-1t-sample-fdc-partitioned", streaming=True)
441
  data_stream = dataset["train"]
442
 
443
  # Iterate through examples
 
457
  from pyspark.sql import SparkSession
458
 
459
  # Initialize Spark session
460
+ spark = SparkSession.builder.appName("EAI-Taxonomy-Web-1T-Sample-FDC-Partitioned").getOrCreate()
461
 
462
  # Load the dataset using the "huggingface" data source
463
+ df = spark.read.format("huggingface").load("EssentialAI/essential-web-1t-sample-fdc-partitioned")
464
 
465
  # Basic dataset exploration
466
  print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
 
471
  df_subset = (
472
  spark.read.format("huggingface")
473
  .option("columns", '["column1", "column2"]') # Replace with actual column names
474
+ .load("EssentialAI/essential-web-1t-sample-fdc-partitioned")
475
  )
476
 
477
  # Run SQL queries on the dataset
478
+ df.createOrReplaceTempView("eai_web_1t_sample_fdc_partitioned_dataset")
479
  result = spark.sql("""
480
  SELECT COUNT(*) as total_examples
481
+ FROM eai_web_1t_sample_fdc_partitioned_dataset
482
  """)
483
  result.show()
484
  ```
 
491
  import daft
492
 
493
  # Load the entire dataset
494
+ df = daft.read_parquet("hf://datasets/EssentialAI/essential-web-1t-sample-fdc-partitioned")
495
 
496
  # Basic exploration
497
  print("Dataset schema:")
 
512
 
513
  # Load with authentication
514
  df = daft.read_parquet(
515
+ "hf://datasets/EssentialAI/essential-web-1t-sample-fdc-partitioned",
516
  hf_token=os.environ["HF_TOKEN"]
517
  )
518
  ```