File size: 20,623 Bytes
d1ae506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
"""
Get California Cannabis Lab Results | Flower Company
Copyright (c) 2023-2024 Cannlytics

Authors:
    Keegan Skeate <https://github.com/keeganskeate>
    Candace O'Sullivan-Sutherland <https://github.com/candy-o>
Created: 12/8/2023
Updated: 5/21/2024
License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>

Description:

    Archive cannabis lab result data published by the Flower Company.

Data Source:

    * [Flower Company](https://flowercompany.com/)

Data points:

    βœ“ product_id (generated)
    βœ“ producer
    βœ“ product_name
    βœ“ product_url
    βœ“ total_thc
    βœ“ total_thc_units
    βœ“ total_cbd
    βœ“ total_cbd_units
    βœ“ price
    βœ“ discount_price
    βœ“ amount
    βœ“ classification
    βœ“ indica_percentage
    βœ“ sativa_percentage
    βœ“ image_url
    βœ“ product_type
    βœ“ product_subtype
    βœ“ product_description
    βœ“ predicted_effects
    βœ“ predicted_aromas
    βœ“ lineage
    βœ“ distributor
    βœ“ distributor_license_number
    βœ“ lab_results_url
    βœ“ results (augmented)

"""
# Standard imports:
from datetime import datetime
import os
from time import sleep

# External imports:
from cannlytics.data import create_sample_id
from cannlytics.data.cache import Bogart
from cannlytics.data.coas.coas import CoADoc
from cannlytics.data.web import initialize_selenium
import pandas as pd
import requests

# Selenium imports.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select


# Define the base URL.
base_url = 'https://flowercompany.com/'

# Define the categories.
brand_pages = []
category_pages = [
    'category/fire-flower',
    'category/cartridges',
    'category/concentrates',
    'category/edibles',
    'category/prerolls',
    'category/top-shelf-nugs',
    'category/just-weed',
    'category/wellness',
    'category/the-freshest',
    'category/staff-picks',
    'category/latest-drops',
]

# Define the indica/sativa types.
indica_percentages = {
    'Indica': 1,
    'I-Hybrid': 0.75,
    'Hybrid': 0.5,
    'S-Hybrid': 0.25,
    'Sativa': 0,
}


def click_yes_button(driver):
    """Click the "Yes" button."""
    try:
        yes_button = driver.find_element(By.CLASS_NAME, 'age-gate-yes-button')
        yes_button.click()
        sleep(2)
    except:
        pass


def click_show_more_button(driver):
    """Click "Show More" until the button is not found."""
    while True:
        try:
            # Find the "Show More" button and click it
            more_button = driver.find_element(By.CLASS_NAME, 'show-more-button')
            more_button.click()
            sleep(3)
        except:
            break


def save_product_data(
        items: list[dict],
        data_dir: str,
        namespace: str = 'results'
    ):
    """Save the product data to a CSV file."""
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)
    timestamp = datetime.now().strftime('%Y-%m-%d')
    datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.csv')
    df = pd.DataFrame(items)
    df.to_csv(datafile, index=False)
    return datafile


def download_coa_pdfs(
        items,
        pdf_dir,
        cache=None,
        url_key='lab_results_url',
        id_key='product_id',
        verbose=True,
        pause=10.0
    ):
    """Download all of the COA PDFs."""
    if not cache: cache = Bogart()
    for obs in items:
        url = obs[url_key]
        if not url:
            continue
        url_hash = cache.hash_url(url)
        if cache.get(url_hash):
            if verbose:
                print(f'Skipped (cached): {url}')
            continue
        response = requests.get(url)
        filename = os.path.join(pdf_dir, obs[id_key] + '.pdf')
        with open(filename, 'wb') as pdf_file:
            pdf_file.write(response.content)
            if verbose:
                print(f'Downloaded PDF: {filename}')
        cache.set(url_hash, {'type': 'download', 'url': url, 'file': filename})
        sleep(pause)


def parse_coa_pdfs(
        parser,
        data,
        pdf_dir,
        cache=None,
        id_key='product_id',
        verbose=True,
    ):
    """Parse corresponding COAs from a DataFrame in a PDF directory."""
    all_results = []
    if not cache: cache = Bogart()
    for _, row in data.iterrows():
        coa_pdf = row[id_key] + '.pdf'
        pdf_file_path = os.path.join(pdf_dir, coa_pdf)
        if not os.path.exists(pdf_file_path):
            continue
        pdf_hash = cache.hash_file(pdf_file_path)
        if cache.get(pdf_hash):
            if verbose:
                print(f'Skipped (cached parse): {pdf_file_path}')
            all_results.append(cache.get(pdf_hash))
            continue
        try:
            coa_data = parser.parse(pdf_file_path)
            entry = {**row.to_dict(), **coa_data[0]}
            entry['coa_pdf'] = coa_pdf
            all_results.append(entry)
            cache.set(pdf_hash, entry)
            if verbose:
                print(f'Parsed COA: {pdf_file_path}')
        except Exception as e:
            if verbose:
                print(f'Failed to parse COA: {pdf_file_path}', str(e))
            continue
    return pd.DataFrame(all_results)


def extract_weight(amount_str: str):
    """Extracts the numerical weight in grams from the amount string."""
    if amount_str:
        parts = amount_str.split('(')
        if len(parts) > 1:
            weight = parts[1].split('g')[0].strip()
            return float(weight)
    return None


def price_to_float(price_str: str):
    """Converts a price string to a float."""
    return float(price_str.replace('$', ''))


def get_products_flower_co(
        data_dir: str,
        cache = None,
        verbose: bool = True,
        headless: bool = True,
        pause_between_page: float = 30.0,
    ):
    """Get products from Flower Company."""

    # Initialize the driver.
    driver = initialize_selenium(headless=headless)

    # Get all of the brand pages.
    driver.get(base_url + 'menu')
    try:
        yes_button = driver.find_element(By.CLASS_NAME, 'age-gate-yes-button')
        yes_button.click()
        sleep(2)
    except Exception as e:
        pass
    div = driver.find_element(By.CLASS_NAME, 'special-content-brand-row')
    links = div.find_elements(by=By.TAG_NAME, value='a')
    for link in links:
        brand_pages.append(link.get_attribute('href').replace(base_url, ''))

    # Open each brand/category page.
    products, recorded = [], set(cache.get('product_urls') or [])
    for page in category_pages + brand_pages:

        # Get the brand/category page.
        driver.get(base_url + page)

        # Click "Yes" button.
        click_yes_button(driver)

        # Click "Show More" until the button is not found.
        click_show_more_button(driver)

        # Get all of the cards.
        sleep(pause_between_page)
        cards = driver.find_elements(by=By.CLASS_NAME, value='product-card-wrapper')
        if verbose:
            print(f'Found {len(cards)} products for page: {page}')

        # Get the data from each card.
        for card in cards:

            # Find the product details.
            producer = card.find_element(By.CSS_SELECTOR, '.favorite-company a').text.strip()
            product_name = card.find_element(By.CSS_SELECTOR, '.favorite-product-name a').text.strip()
            product_url = card.find_element(By.CSS_SELECTOR, '.favorite-product-name a').get_attribute('href')
            
            # Skip the product if it's already recorded.
            if product_url in recorded:
                continue
            recorded.add(product_url)

            # Get the total THC.
            # Optional: Get other totals.
            try:
                total_thc = card.find_element(By.CSS_SELECTOR, '.product-card-thc').text.strip()
            except:
                total_thc = ''

            # Find the price and discount.
            discount = 0
            discount_price = card.find_element(By.CSS_SELECTOR, '.price.product-card-price-actual').text.strip()
            price = card.find_element(By.CSS_SELECTOR, '.price.retail.product-card-price-retail').text.strip()

            # Find the amount.
            try:
                amount = card.find_element(By.CSS_SELECTOR, '.solo-variant-toggle').text.strip()
            except:
                select_element = card.find_element(By.CSS_SELECTOR, 'select.new-product-card-variant-select')
                select_object = Select(select_element)
                amount_options = [option.text.strip() for option in select_object.options]
                amount = amount_options[0] if amount_options else None

            # Find the strain type.
            classification = card.text.split('\n')[0]
            indica_percentage = indica_percentages.get(classification, 0.5)
            sativa_percentage = 1 - indica_percentage

            # Clean the data.
            try:
                total_thc_units = 'percent' if '%' in total_thc else 'mg'
                total_thc = float(total_thc.lower().replace('% thc', '').replace('mg thc', '').strip())
                price = price_to_float(price)
                discount_price = price_to_float(discount_price)
                discount = price - discount_price
            except:
                pass

            # Add the product to the list.
            products.append({
                'product_name': product_name,
                'category': page.split('/')[-1],
                'producer': producer,
                'total_thc': total_thc,
                'total_thc_units': total_thc_units,
                'price': price,
                'discount_price': discount_price,
                'discount': discount,
                'amount': extract_weight(amount),
                'classification': classification,
                'indica_percentage': indica_percentage,
                'sativa_percentage': sativa_percentage,
                'product_url': product_url,
            })

    # Cache the product URLs.
    cache.set('product_urls', list(recorded))

    # Open file of all saved product URLs.
    products_datafile = os.path.join(data_dir, f'ca-all-products-flower-company.csv')
    if os.path.exists(products_datafile):
        existing_products = pd.read_csv(products_datafile)
        if verbose:
            print('Number of existing products:', len(existing_products))
        new_products = pd.DataFrame(products)
        new_products['total_thc'] = pd.to_numeric(new_products['total_thc'], errors='coerce')
        new_products['total_thc'].fillna(0, inplace=True)
        existing_products['total_thc'] = pd.to_numeric(existing_products['total_thc'], errors='coerce')
        existing_combo = existing_products[['product_url', 'total_thc']]
        merged_df = pd.merge(new_products, existing_combo, on=['product_url', 'total_thc'], how='left', indicator=True)
        unrecorded_products = merged_df[merged_df['_merge'] == 'left_only']
        unrecorded_products.drop(columns=['_merge'], inplace=True)
    else:
        unrecorded_products = pd.DataFrame(products)

    # Get each product URL page to get each product's data and results.
    data = []
    if verbose:
        print('Number of unrecorded products:', len(unrecorded_products))
    for _, product in unrecorded_products.iterrows():
        if verbose:
            print(f'Getting data for: {product["product_url"]}')
        driver.get(product['product_url'])
        sleep(pause_between_page)

        # Click "Yes" button.
        click_yes_button(driver)

        # Get data for each product:
        types = driver.find_elements(By.CSS_SELECTOR, '.detail-product-type')
        if types:
            product_type = types[0].text.strip()
        if len(types) >= 2:
            product_subtype = types[1].text.strip()
        else:
            product_subtype = None

        # Get the product description.
        try:
            product_description = driver.find_element(By.CSS_SELECTOR, '.product-view-description').text.strip()
        except:
            product_description = None

        # Skip accessories.
        if product_type == 'Accessory':
            continue

        # Get the effects, aromas, lineage, and lab results URL.
        info_rows = driver.find_elements(By.CSS_SELECTOR, '.row.product-view-row')
        contents, effects, aromas, lineage, lab_results_url = '', '', '', '', ''
        for row in info_rows:
            parts = row.text.split('\n')
            field = parts[0].lower()
            if 'contents' in field:
                contents = parts[-1]
            elif 'effects' in field:
                effects = parts[-1]
            elif 'aromas' in field:
                aromas = parts[-1]
            elif 'lineage' in field:
                lineage = parts[-1]
            elif 'tested' in field:
                try:
                    el = row.find_element(By.TAG_NAME, 'a')
                    lab_results_url = el.get_attribute('href')
                except:
                    pass
        
        # Get the distributor.
        els = driver.find_elements(By.CSS_SELECTOR, '.row.d-block .detail-sub-text')
        distributor = els[-2].text.strip() if len(els) > 1 else None
        distributor_license_number = els[-1].text.strip() if len(els) > 1 else None
        
        # Get the image URL.
        image_url = driver.find_element(By.CSS_SELECTOR, '.product-image-lg').get_attribute('src')

        # Get product name and producer, if missing.
        if not product['product_name']:
            product['product_name'] = driver.find_element(By.CSS_SELECTOR, '.product-view-name').text
            product['producer'] = driver.find_element(By.CSS_SELECTOR, '.product-view-brand').text
        
        # Get prices and amounts, if missing.
        if not product['price']:
            price_element = driver.find_element(By.ID, 'variant-price-retail')
            driver.execute_script("arguments[0].scrollIntoView(true);", price_element)
            sleep(0.33)
            price = price_element.text
            discount_price = driver.find_element(By.ID, 'variant-price').text
            amount = driver.find_element(By.CSS_SELECTOR, '.variant-toggle').text
            product['amount'] = extract_weight(amount)
            product['price'] = price_to_float(price)
            product['discount_price'] = price_to_float(discount_price)
            product['discount'] = product['price'] - product['discount_price']

        # Get compounds, if missing.
        if not product.get('total_thc'):
            try:
                total_thc = driver.find_element(By.CSS_SELECTOR, '.product-card-thc').text
                product['total_thc'] = float(total_thc.lower().replace('% thc', '').replace('mg thc', '').strip())
                product['total_thc_units'] = 'percent' if '%' in total_thc else 'mg'
            except:
                pass
        if not product.get('total_cbd'):
            try:
                total_cbd = driver.find_element(By.CSS_SELECTOR, '.product-card-cbd').text
                product['total_cbd'] = float(total_cbd.lower().replace('% cbd', '').replace('mg cbd', '').strip())
                product['total_cbd_units'] = 'percent' if '%' in total_cbd else 'mg'
            except:
                product['total_cbd'] = None

        # Get classification, if missing.
        if not product['classification']:
            el = driver.find_element(By.CSS_SELECTOR, '.product-detail-type-container')
            product['classification'] = el.text.split('\n')[0]
            product['indica_percentage'] = indica_percentages.get(product['classification'], 0.5)
            product['sativa_percentage'] = 1 - indica_percentage
        
        # Create a product ID.
        product_id = create_sample_id(
            private_key=str(product['total_thc']),
            public_key=product['product_name'],
            salt=product['producer'],
        )

        # Record the product item details.
        item = {
            'product_id': product_id,
            'lab_results_url': lab_results_url,
            'image_url': image_url,
            'product_type': product_type,
            # Note: `product_subtype` may be getting over-ridden.
            # Deprecate `product_sub_type` once confirmed.
            'product_subtype': product_subtype,
            'product_sub_type': product_subtype,
            'product_description': product_description,
            'product_contents': contents,
            'predicted_effects': effects,
            'predicted_aromas': aromas.split(', '),
            'lineage': lineage,
            'distributor': distributor,
            'distributor_license_number': distributor_license_number,
        }
        data.append({**product, **item})

    # Close the browser.
    driver.close()
    driver.quit()

    # Return the data.
    return data


def get_results_ca_flower_co(
        pdf_dir,
        data_dir,
        cache_path=None,
        verbose=True,
        namespace = 'ca-products-flower-company',
    ):
    """Get California cannabis lab results from the Flower Company."""
    if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
    if not os.path.exists(data_dir): os.makedirs(data_dir)
    cache = Bogart(cache_path)
    data = get_products_flower_co(data_dir, cache=cache, verbose=verbose)
    datafile = save_product_data(data, data_dir, namespace=namespace)
    cache.set(cache.hash_file(datafile), {'type': 'datafile', 'file': datafile})
    if verbose: print(f'Saved {len(data)} products to: {datafile}')
    download_coa_pdfs(data, pdf_dir=pdf_dir, cache=cache, verbose=verbose)
    return data


# TODO: Turn the following into standalone functions.

def parse_coas_ca_flower_co():
    """Parse COAs from the Flower Company."""
    pass

# # Aggregate product URLs that have been recorded.
# existing_products = []
# url_files = [x for x in os.listdir(data_dir) if 'products' in x and 'all' not in x]
# for url_file in url_files:
#     product_df = pd.read_csv(os.path.join(data_dir, url_file))
#     existing_products.append(product_df)
# existing_products = pd.concat(existing_products)
# existing_products.drop_duplicates(subset=['product_url', 'total_thc'], inplace=True)
# print('Final number of products:', len(existing_products))
# products_datafile = os.path.join(data_dir, f'ca-all-products-flower-company.csv')
# existing_products.to_csv(products_datafile, index=False)

# # Read the download product items.
# product_data = pd.read_csv(datafile)

# # Parse any un-parsed COAs.
# # FIXME: For some reason this is causing a memory leak.
# TODO: Ensure the PDF can be matched to the data.
# parser = CoADoc()
# results = parse_coa_pdfs(
#     parser=parser,
#     data=product_data,
#     cache=cache,
#     pdf_dir=pdf_dir,
#     verbose=verbose,
# )

# # Save the parsed COA data to a file.
# # TODO: Keep track of the datafile in the cache.
# namespace = 'ca-results-flower-company'
# timestamp = datetime.now().strftime('%Y-%m-%d')
# results_datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.xlsx')
# parser.save(results, results_datafile)
# print(f'Saved {len(results)} parsed COAs to: {results_datafile}')

# Save all lab results.
# all_results = []
# results_files = [x for x in os.listdir(data_dir) if 'results' in x and 'all' not in x]
# for results_file in results_files:
#     results_df = pd.read_excel(os.path.join(data_dir, results_file))
#     all_results.append(results_df)
# all_results = pd.concat(all_results)
# all_results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True)
# # all_results = all_results.loc[all_results['results'] != '[]']
# print('Final number of results:', len(all_results))
# all_results_datafile = os.path.join(data_dir, f'ca-all-results-flower-company.xlsx')
# all_results.to_excel(all_results_datafile, index=False)
# print(f'Saved {len(all_results)} results to: {all_results_datafile}')


def archive_results_ca_flower_co():
    """Archive the results from the Flower Company."""
    pass

    # # FIXME: Upload data to Firestore.


    # # FIXME: Upload files to Google Cloud Storage.



    # # FIXME: Upload datafiles to Google Cloud Storage.


# === Test ===
# [βœ“] Tested: 2024-05-21 by Keegan Skeate <keegan@cannlytics>
if __name__ == '__main__':

    # Get results.
    all_results = get_results_ca_flower_co(
        pdf_dir='D:/data/california/results/pdfs/flower-company',
        data_dir='D:/data/california/results/datasets/flower-company',
        cache_path='D://data/.cache/results-ca-flower-co.jsonl',
        verbose=True,
    )

    # Parse COAs.

    # Archive results.