File size: 2,862 Bytes
4ce09e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import re
from collections import Counter

import pandas as pd
from datasketch import MinHash, MinHashLSH
from lingua import Language, LanguageDetectorBuilder

# Initialize variables for statistics
word_count = Counter()
longest_sentence = ""
shortest_sentence = None
total_sentences = 0
all_sentences = []


def tokenize(text):
    """
    Clean and split text into words.
    """
    # Remove punctuation and split by whitespace
    words = re.findall(r'\b\w+\b', text.lower())
    return words


# Open the file and process line by line
with open('tunisian_data.txt', 'r') as file:
    for line in file:
        # Strip leading/trailing whitespace
        line = line.strip()

        # Skip empty lines
        if not line:
            continue

        # Split the line into sentences (using '.', '!', or '?' as delimiters)
        sentences = re.split(r'[.!?]', line)

        for sentence in sentences:
            sentence = sentence.strip()
            if sentence:
                all_sentences.append(sentence)
                total_sentences += 1

                # Update longest and shortest sentences
                if len(sentence) > len(longest_sentence):
                    longest_sentence = sentence
                if shortest_sentence is None or len(sentence) < len(shortest_sentence):
                    shortest_sentence = sentence

                # Tokenize and count words
                words = tokenize(sentence)
                word_count.update(words)

# Get the most common words
most_common_words = word_count.most_common(10)
print(f"Most Common Words: {most_common_words}")


def get_minhash(text, num_perm=128):
    """
    Generate a MinHash for a given text.
    """
    tokens = set(text.split())
    m = MinHash(num_perm=num_perm)
    for token in tokens:
        m.update(token.encode('utf8'))
    return m


def minhash_deduplication(docs, threshold=0.8, num_perm=128):
    """
    Remove near-duplicate documents using MinHash LSH.
    """
    lsh = MinHashLSH(threshold=threshold, num_perm=num_perm)
    unique_docs = []

    for i, doc in enumerate(docs):
        m = get_minhash(doc, num_perm=num_perm)
        if not lsh.query(m):  # Check if the document is a near duplicate
            lsh.insert(i, m)
            unique_docs.append(doc)

    return unique_docs


unique_docs = minhash_deduplication(all_sentences, threshold=0.8)
print(f"Number of unique documents: {len(unique_docs)}")

# Language detection
detector = LanguageDetectorBuilder.from_languages(*Language.all()).build()
labels = []
cleaned_text = []

for s in unique_docs:
    l = detector.detect_language_of(s)
    if not l:
        print(f"Could not detect language for sentence: {s}")
    else:
        labels.append(l.name)
        cleaned_text.append(s)

# Create a DataFrame with the cleaned text
df = pd.DataFrame({'text': cleaned_text})