Update README.md
Browse files
README.md
CHANGED
@@ -424,7 +424,7 @@ Domain and content type classification probabilities:
|
|
424 |
|
425 |
## How to Load the Dataset
|
426 |
|
427 |
-
This section provides examples of how to load the `EssentialAI/eai-taxonomy-code-w-dclm` dataset using different Python libraries and frameworks.
|
428 |
|
429 |
### Using Hugging Face Datasets (Standard Method)
|
430 |
|
@@ -434,7 +434,7 @@ The simplest way to load the dataset is using the Hugging Face `datasets` librar
|
|
434 |
from datasets import load_dataset
|
435 |
|
436 |
# Load the entire dataset
|
437 |
-
dataset = load_dataset("EssentialAI/eai-taxonomy-code-w-dclm")
|
438 |
|
439 |
# View dataset structure
|
440 |
print(dataset)
|
@@ -447,7 +447,7 @@ You can also load the dataset in streaming mode to avoid downloading the entire
|
|
447 |
from datasets import load_dataset
|
448 |
|
449 |
# Load in streaming mode
|
450 |
-
dataset = load_dataset("EssentialAI/eai-taxonomy-code-w-dclm", streaming=True)
|
451 |
data_stream = dataset["train"]
|
452 |
|
453 |
# Iterate through examples
|
@@ -470,7 +470,7 @@ from pyspark.sql import SparkSession
|
|
470 |
spark = SparkSession.builder.appName("EAI-Taxonomy-Code-w-DCLM").getOrCreate()
|
471 |
|
472 |
# Load the dataset using the "huggingface" data source
|
473 |
-
df = spark.read.format("huggingface").load("EssentialAI/eai-taxonomy-code-w-dclm")
|
474 |
|
475 |
# Basic dataset exploration
|
476 |
print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
|
@@ -481,7 +481,7 @@ df.printSchema()
|
|
481 |
df_subset = (
|
482 |
spark.read.format("huggingface")
|
483 |
.option("columns", '["column1", "column2"]') # Replace with actual column names
|
484 |
-
.load("EssentialAI/eai-taxonomy-code-w-dclm")
|
485 |
)
|
486 |
|
487 |
# Run SQL queries on the dataset
|
@@ -501,7 +501,7 @@ Daft provides a modern DataFrame library optimized for machine learning workload
|
|
501 |
import daft
|
502 |
|
503 |
# Load the entire dataset
|
504 |
-
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-code-w-dclm")
|
505 |
|
506 |
# Basic exploration
|
507 |
print("Dataset schema:")
|
@@ -518,7 +518,7 @@ import daft
|
|
518 |
from daft.io import IOConfig, HTTPConfig
|
519 |
|
520 |
io_config = IOConfig(http=HTTPConfig(bearer_token="your_token"))
|
521 |
-
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-code-w-dclm", io_config=io_config)
|
522 |
```
|
523 |
|
524 |
### Installation Requirements
|
|
|
424 |
|
425 |
## How to Load the Dataset
|
426 |
|
427 |
+
This section provides examples of how to load the `EssentialAI/eai-taxonomy-code-w-dclm-100b-sample` dataset using different Python libraries and frameworks.
|
428 |
|
429 |
### Using Hugging Face Datasets (Standard Method)
|
430 |
|
|
|
434 |
from datasets import load_dataset
|
435 |
|
436 |
# Load the entire dataset
|
437 |
+
dataset = load_dataset("EssentialAI/eai-taxonomy-code-w-dclm-100b-sample")
|
438 |
|
439 |
# View dataset structure
|
440 |
print(dataset)
|
|
|
447 |
from datasets import load_dataset
|
448 |
|
449 |
# Load in streaming mode
|
450 |
+
dataset = load_dataset("EssentialAI/eai-taxonomy-code-w-dclm-100b-sample", streaming=True)
|
451 |
data_stream = dataset["train"]
|
452 |
|
453 |
# Iterate through examples
|
|
|
470 |
spark = SparkSession.builder.appName("EAI-Taxonomy-Code-w-DCLM").getOrCreate()
|
471 |
|
472 |
# Load the dataset using the "huggingface" data source
|
473 |
+
df = spark.read.format("huggingface").load("EssentialAI/eai-taxonomy-code-w-dclm-100b-sample")
|
474 |
|
475 |
# Basic dataset exploration
|
476 |
print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
|
|
|
481 |
df_subset = (
|
482 |
spark.read.format("huggingface")
|
483 |
.option("columns", '["column1", "column2"]') # Replace with actual column names
|
484 |
+
.load("EssentialAI/eai-taxonomy-code-w-dclm-100b-sample")
|
485 |
)
|
486 |
|
487 |
# Run SQL queries on the dataset
|
|
|
501 |
import daft
|
502 |
|
503 |
# Load the entire dataset
|
504 |
+
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-code-w-dclm-100b-sample")
|
505 |
|
506 |
# Basic exploration
|
507 |
print("Dataset schema:")
|
|
|
518 |
from daft.io import IOConfig, HTTPConfig
|
519 |
|
520 |
io_config = IOConfig(http=HTTPConfig(bearer_token="your_token"))
|
521 |
+
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-code-w-dclm-100b-sample", io_config=io_config)
|
522 |
```
|
523 |
|
524 |
### Installation Requirements
|