cannabis_results / algorithms /get_results_ca_flower_co.py
keeganskeate's picture
latest-2024-08-11 (#6)
d1ae506 verified
"""
Get California Cannabis Lab Results | Flower Company
Copyright (c) 2023-2024 Cannlytics
Authors:
Keegan Skeate <https://github.com/keeganskeate>
Candace O'Sullivan-Sutherland <https://github.com/candy-o>
Created: 12/8/2023
Updated: 5/21/2024
License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
Description:
Archive cannabis lab result data published by the Flower Company.
Data Source:
* [Flower Company](https://flowercompany.com/)
Data points:
βœ“ product_id (generated)
βœ“ producer
βœ“ product_name
βœ“ product_url
βœ“ total_thc
βœ“ total_thc_units
βœ“ total_cbd
βœ“ total_cbd_units
βœ“ price
βœ“ discount_price
βœ“ amount
βœ“ classification
βœ“ indica_percentage
βœ“ sativa_percentage
βœ“ image_url
βœ“ product_type
βœ“ product_subtype
βœ“ product_description
βœ“ predicted_effects
βœ“ predicted_aromas
βœ“ lineage
βœ“ distributor
βœ“ distributor_license_number
βœ“ lab_results_url
βœ“ results (augmented)
"""
# Standard imports:
from datetime import datetime
import os
from time import sleep
# External imports:
from cannlytics.data import create_sample_id
from cannlytics.data.cache import Bogart
from cannlytics.data.coas.coas import CoADoc
from cannlytics.data.web import initialize_selenium
import pandas as pd
import requests
# Selenium imports.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
# Define the base URL.
base_url = 'https://flowercompany.com/'
# Define the categories.
brand_pages = []
category_pages = [
'category/fire-flower',
'category/cartridges',
'category/concentrates',
'category/edibles',
'category/prerolls',
'category/top-shelf-nugs',
'category/just-weed',
'category/wellness',
'category/the-freshest',
'category/staff-picks',
'category/latest-drops',
]
# Define the indica/sativa types.
indica_percentages = {
'Indica': 1,
'I-Hybrid': 0.75,
'Hybrid': 0.5,
'S-Hybrid': 0.25,
'Sativa': 0,
}
def click_yes_button(driver):
"""Click the "Yes" button."""
try:
yes_button = driver.find_element(By.CLASS_NAME, 'age-gate-yes-button')
yes_button.click()
sleep(2)
except:
pass
def click_show_more_button(driver):
"""Click "Show More" until the button is not found."""
while True:
try:
# Find the "Show More" button and click it
more_button = driver.find_element(By.CLASS_NAME, 'show-more-button')
more_button.click()
sleep(3)
except:
break
def save_product_data(
items: list[dict],
data_dir: str,
namespace: str = 'results'
):
"""Save the product data to a CSV file."""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
timestamp = datetime.now().strftime('%Y-%m-%d')
datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.csv')
df = pd.DataFrame(items)
df.to_csv(datafile, index=False)
return datafile
def download_coa_pdfs(
items,
pdf_dir,
cache=None,
url_key='lab_results_url',
id_key='product_id',
verbose=True,
pause=10.0
):
"""Download all of the COA PDFs."""
if not cache: cache = Bogart()
for obs in items:
url = obs[url_key]
if not url:
continue
url_hash = cache.hash_url(url)
if cache.get(url_hash):
if verbose:
print(f'Skipped (cached): {url}')
continue
response = requests.get(url)
filename = os.path.join(pdf_dir, obs[id_key] + '.pdf')
with open(filename, 'wb') as pdf_file:
pdf_file.write(response.content)
if verbose:
print(f'Downloaded PDF: {filename}')
cache.set(url_hash, {'type': 'download', 'url': url, 'file': filename})
sleep(pause)
def parse_coa_pdfs(
parser,
data,
pdf_dir,
cache=None,
id_key='product_id',
verbose=True,
):
"""Parse corresponding COAs from a DataFrame in a PDF directory."""
all_results = []
if not cache: cache = Bogart()
for _, row in data.iterrows():
coa_pdf = row[id_key] + '.pdf'
pdf_file_path = os.path.join(pdf_dir, coa_pdf)
if not os.path.exists(pdf_file_path):
continue
pdf_hash = cache.hash_file(pdf_file_path)
if cache.get(pdf_hash):
if verbose:
print(f'Skipped (cached parse): {pdf_file_path}')
all_results.append(cache.get(pdf_hash))
continue
try:
coa_data = parser.parse(pdf_file_path)
entry = {**row.to_dict(), **coa_data[0]}
entry['coa_pdf'] = coa_pdf
all_results.append(entry)
cache.set(pdf_hash, entry)
if verbose:
print(f'Parsed COA: {pdf_file_path}')
except Exception as e:
if verbose:
print(f'Failed to parse COA: {pdf_file_path}', str(e))
continue
return pd.DataFrame(all_results)
def extract_weight(amount_str: str):
"""Extracts the numerical weight in grams from the amount string."""
if amount_str:
parts = amount_str.split('(')
if len(parts) > 1:
weight = parts[1].split('g')[0].strip()
return float(weight)
return None
def price_to_float(price_str: str):
"""Converts a price string to a float."""
return float(price_str.replace('$', ''))
def get_products_flower_co(
data_dir: str,
cache = None,
verbose: bool = True,
headless: bool = True,
pause_between_page: float = 30.0,
):
"""Get products from Flower Company."""
# Initialize the driver.
driver = initialize_selenium(headless=headless)
# Get all of the brand pages.
driver.get(base_url + 'menu')
try:
yes_button = driver.find_element(By.CLASS_NAME, 'age-gate-yes-button')
yes_button.click()
sleep(2)
except Exception as e:
pass
div = driver.find_element(By.CLASS_NAME, 'special-content-brand-row')
links = div.find_elements(by=By.TAG_NAME, value='a')
for link in links:
brand_pages.append(link.get_attribute('href').replace(base_url, ''))
# Open each brand/category page.
products, recorded = [], set(cache.get('product_urls') or [])
for page in category_pages + brand_pages:
# Get the brand/category page.
driver.get(base_url + page)
# Click "Yes" button.
click_yes_button(driver)
# Click "Show More" until the button is not found.
click_show_more_button(driver)
# Get all of the cards.
sleep(pause_between_page)
cards = driver.find_elements(by=By.CLASS_NAME, value='product-card-wrapper')
if verbose:
print(f'Found {len(cards)} products for page: {page}')
# Get the data from each card.
for card in cards:
# Find the product details.
producer = card.find_element(By.CSS_SELECTOR, '.favorite-company a').text.strip()
product_name = card.find_element(By.CSS_SELECTOR, '.favorite-product-name a').text.strip()
product_url = card.find_element(By.CSS_SELECTOR, '.favorite-product-name a').get_attribute('href')
# Skip the product if it's already recorded.
if product_url in recorded:
continue
recorded.add(product_url)
# Get the total THC.
# Optional: Get other totals.
try:
total_thc = card.find_element(By.CSS_SELECTOR, '.product-card-thc').text.strip()
except:
total_thc = ''
# Find the price and discount.
discount = 0
discount_price = card.find_element(By.CSS_SELECTOR, '.price.product-card-price-actual').text.strip()
price = card.find_element(By.CSS_SELECTOR, '.price.retail.product-card-price-retail').text.strip()
# Find the amount.
try:
amount = card.find_element(By.CSS_SELECTOR, '.solo-variant-toggle').text.strip()
except:
select_element = card.find_element(By.CSS_SELECTOR, 'select.new-product-card-variant-select')
select_object = Select(select_element)
amount_options = [option.text.strip() for option in select_object.options]
amount = amount_options[0] if amount_options else None
# Find the strain type.
classification = card.text.split('\n')[0]
indica_percentage = indica_percentages.get(classification, 0.5)
sativa_percentage = 1 - indica_percentage
# Clean the data.
try:
total_thc_units = 'percent' if '%' in total_thc else 'mg'
total_thc = float(total_thc.lower().replace('% thc', '').replace('mg thc', '').strip())
price = price_to_float(price)
discount_price = price_to_float(discount_price)
discount = price - discount_price
except:
pass
# Add the product to the list.
products.append({
'product_name': product_name,
'category': page.split('/')[-1],
'producer': producer,
'total_thc': total_thc,
'total_thc_units': total_thc_units,
'price': price,
'discount_price': discount_price,
'discount': discount,
'amount': extract_weight(amount),
'classification': classification,
'indica_percentage': indica_percentage,
'sativa_percentage': sativa_percentage,
'product_url': product_url,
})
# Cache the product URLs.
cache.set('product_urls', list(recorded))
# Open file of all saved product URLs.
products_datafile = os.path.join(data_dir, f'ca-all-products-flower-company.csv')
if os.path.exists(products_datafile):
existing_products = pd.read_csv(products_datafile)
if verbose:
print('Number of existing products:', len(existing_products))
new_products = pd.DataFrame(products)
new_products['total_thc'] = pd.to_numeric(new_products['total_thc'], errors='coerce')
new_products['total_thc'].fillna(0, inplace=True)
existing_products['total_thc'] = pd.to_numeric(existing_products['total_thc'], errors='coerce')
existing_combo = existing_products[['product_url', 'total_thc']]
merged_df = pd.merge(new_products, existing_combo, on=['product_url', 'total_thc'], how='left', indicator=True)
unrecorded_products = merged_df[merged_df['_merge'] == 'left_only']
unrecorded_products.drop(columns=['_merge'], inplace=True)
else:
unrecorded_products = pd.DataFrame(products)
# Get each product URL page to get each product's data and results.
data = []
if verbose:
print('Number of unrecorded products:', len(unrecorded_products))
for _, product in unrecorded_products.iterrows():
if verbose:
print(f'Getting data for: {product["product_url"]}')
driver.get(product['product_url'])
sleep(pause_between_page)
# Click "Yes" button.
click_yes_button(driver)
# Get data for each product:
types = driver.find_elements(By.CSS_SELECTOR, '.detail-product-type')
if types:
product_type = types[0].text.strip()
if len(types) >= 2:
product_subtype = types[1].text.strip()
else:
product_subtype = None
# Get the product description.
try:
product_description = driver.find_element(By.CSS_SELECTOR, '.product-view-description').text.strip()
except:
product_description = None
# Skip accessories.
if product_type == 'Accessory':
continue
# Get the effects, aromas, lineage, and lab results URL.
info_rows = driver.find_elements(By.CSS_SELECTOR, '.row.product-view-row')
contents, effects, aromas, lineage, lab_results_url = '', '', '', '', ''
for row in info_rows:
parts = row.text.split('\n')
field = parts[0].lower()
if 'contents' in field:
contents = parts[-1]
elif 'effects' in field:
effects = parts[-1]
elif 'aromas' in field:
aromas = parts[-1]
elif 'lineage' in field:
lineage = parts[-1]
elif 'tested' in field:
try:
el = row.find_element(By.TAG_NAME, 'a')
lab_results_url = el.get_attribute('href')
except:
pass
# Get the distributor.
els = driver.find_elements(By.CSS_SELECTOR, '.row.d-block .detail-sub-text')
distributor = els[-2].text.strip() if len(els) > 1 else None
distributor_license_number = els[-1].text.strip() if len(els) > 1 else None
# Get the image URL.
image_url = driver.find_element(By.CSS_SELECTOR, '.product-image-lg').get_attribute('src')
# Get product name and producer, if missing.
if not product['product_name']:
product['product_name'] = driver.find_element(By.CSS_SELECTOR, '.product-view-name').text
product['producer'] = driver.find_element(By.CSS_SELECTOR, '.product-view-brand').text
# Get prices and amounts, if missing.
if not product['price']:
price_element = driver.find_element(By.ID, 'variant-price-retail')
driver.execute_script("arguments[0].scrollIntoView(true);", price_element)
sleep(0.33)
price = price_element.text
discount_price = driver.find_element(By.ID, 'variant-price').text
amount = driver.find_element(By.CSS_SELECTOR, '.variant-toggle').text
product['amount'] = extract_weight(amount)
product['price'] = price_to_float(price)
product['discount_price'] = price_to_float(discount_price)
product['discount'] = product['price'] - product['discount_price']
# Get compounds, if missing.
if not product.get('total_thc'):
try:
total_thc = driver.find_element(By.CSS_SELECTOR, '.product-card-thc').text
product['total_thc'] = float(total_thc.lower().replace('% thc', '').replace('mg thc', '').strip())
product['total_thc_units'] = 'percent' if '%' in total_thc else 'mg'
except:
pass
if not product.get('total_cbd'):
try:
total_cbd = driver.find_element(By.CSS_SELECTOR, '.product-card-cbd').text
product['total_cbd'] = float(total_cbd.lower().replace('% cbd', '').replace('mg cbd', '').strip())
product['total_cbd_units'] = 'percent' if '%' in total_cbd else 'mg'
except:
product['total_cbd'] = None
# Get classification, if missing.
if not product['classification']:
el = driver.find_element(By.CSS_SELECTOR, '.product-detail-type-container')
product['classification'] = el.text.split('\n')[0]
product['indica_percentage'] = indica_percentages.get(product['classification'], 0.5)
product['sativa_percentage'] = 1 - indica_percentage
# Create a product ID.
product_id = create_sample_id(
private_key=str(product['total_thc']),
public_key=product['product_name'],
salt=product['producer'],
)
# Record the product item details.
item = {
'product_id': product_id,
'lab_results_url': lab_results_url,
'image_url': image_url,
'product_type': product_type,
# Note: `product_subtype` may be getting over-ridden.
# Deprecate `product_sub_type` once confirmed.
'product_subtype': product_subtype,
'product_sub_type': product_subtype,
'product_description': product_description,
'product_contents': contents,
'predicted_effects': effects,
'predicted_aromas': aromas.split(', '),
'lineage': lineage,
'distributor': distributor,
'distributor_license_number': distributor_license_number,
}
data.append({**product, **item})
# Close the browser.
driver.close()
driver.quit()
# Return the data.
return data
def get_results_ca_flower_co(
pdf_dir,
data_dir,
cache_path=None,
verbose=True,
namespace = 'ca-products-flower-company',
):
"""Get California cannabis lab results from the Flower Company."""
if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
if not os.path.exists(data_dir): os.makedirs(data_dir)
cache = Bogart(cache_path)
data = get_products_flower_co(data_dir, cache=cache, verbose=verbose)
datafile = save_product_data(data, data_dir, namespace=namespace)
cache.set(cache.hash_file(datafile), {'type': 'datafile', 'file': datafile})
if verbose: print(f'Saved {len(data)} products to: {datafile}')
download_coa_pdfs(data, pdf_dir=pdf_dir, cache=cache, verbose=verbose)
return data
# TODO: Turn the following into standalone functions.
def parse_coas_ca_flower_co():
"""Parse COAs from the Flower Company."""
pass
# # Aggregate product URLs that have been recorded.
# existing_products = []
# url_files = [x for x in os.listdir(data_dir) if 'products' in x and 'all' not in x]
# for url_file in url_files:
# product_df = pd.read_csv(os.path.join(data_dir, url_file))
# existing_products.append(product_df)
# existing_products = pd.concat(existing_products)
# existing_products.drop_duplicates(subset=['product_url', 'total_thc'], inplace=True)
# print('Final number of products:', len(existing_products))
# products_datafile = os.path.join(data_dir, f'ca-all-products-flower-company.csv')
# existing_products.to_csv(products_datafile, index=False)
# # Read the download product items.
# product_data = pd.read_csv(datafile)
# # Parse any un-parsed COAs.
# # FIXME: For some reason this is causing a memory leak.
# TODO: Ensure the PDF can be matched to the data.
# parser = CoADoc()
# results = parse_coa_pdfs(
# parser=parser,
# data=product_data,
# cache=cache,
# pdf_dir=pdf_dir,
# verbose=verbose,
# )
# # Save the parsed COA data to a file.
# # TODO: Keep track of the datafile in the cache.
# namespace = 'ca-results-flower-company'
# timestamp = datetime.now().strftime('%Y-%m-%d')
# results_datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.xlsx')
# parser.save(results, results_datafile)
# print(f'Saved {len(results)} parsed COAs to: {results_datafile}')
# Save all lab results.
# all_results = []
# results_files = [x for x in os.listdir(data_dir) if 'results' in x and 'all' not in x]
# for results_file in results_files:
# results_df = pd.read_excel(os.path.join(data_dir, results_file))
# all_results.append(results_df)
# all_results = pd.concat(all_results)
# all_results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True)
# # all_results = all_results.loc[all_results['results'] != '[]']
# print('Final number of results:', len(all_results))
# all_results_datafile = os.path.join(data_dir, f'ca-all-results-flower-company.xlsx')
# all_results.to_excel(all_results_datafile, index=False)
# print(f'Saved {len(all_results)} results to: {all_results_datafile}')
def archive_results_ca_flower_co():
"""Archive the results from the Flower Company."""
pass
# # FIXME: Upload data to Firestore.
# # FIXME: Upload files to Google Cloud Storage.
# # FIXME: Upload datafiles to Google Cloud Storage.
# === Test ===
# [βœ“] Tested: 2024-05-21 by Keegan Skeate <keegan@cannlytics>
if __name__ == '__main__':
# Get results.
all_results = get_results_ca_flower_co(
pdf_dir='D:/data/california/results/pdfs/flower-company',
data_dir='D:/data/california/results/datasets/flower-company',
cache_path='D://data/.cache/results-ca-flower-co.jsonl',
verbose=True,
)
# Parse COAs.
# Archive results.