| language
				 stringclasses 6
				values | original_string
				 stringlengths 25 887k | text
				 stringlengths 25 887k | 
|---|---|---|
| 
	Python | 
	def k8s_conf_dict(boot_conf, hb_conf):
    """
    Generates and returns a dict of the k8s deployment configuration
    :param boot_conf: the snaps-boot config dict
    :param hb_conf: the adrenaline config dict
    :return: dict with one key 'kubernetes' containing the rest of the data
    """
    k8s_dict = __generate_base_k8s_config(boot_conf, hb_conf)
    k8s_dict['kubernetes']['node_configuration'] = __generate_node_config(
        boot_conf, hb_conf)
    return k8s_dict | 
	def k8s_conf_dict(boot_conf, hb_conf):
    """
    Generates and returns a dict of the k8s deployment configuration
    :param boot_conf: the snaps-boot config dict
    :param hb_conf: the adrenaline config dict
    :return: dict with one key 'kubernetes' containing the rest of the data
    """
    k8s_dict = __generate_base_k8s_config(boot_conf, hb_conf)
    k8s_dict['kubernetes']['node_configuration'] = __generate_node_config(
        boot_conf, hb_conf)
    return k8s_dict | 
| 
	Python | 
	def __generate_base_k8s_config(boot_conf, hb_conf):
    """
    Generates a snaps-kubernetes configuration dict without any
    node_configuration
    :param boot_conf: the snaps-boot config dict
    :param hb_conf: the adrenaline config dict
    :return: a dict
    """
    out_conf = dict()
    out_conf.update(hb_conf)
    if 'master_ip' not in hb_conf:
        repo_node = hb_conf['docker']['repo_host']
        ip = get_node_ip(boot_conf, repo_node)
        out_conf['master_ip'] = ip
    if 'parent_intf' not in hb_conf:
        parent_intf = hb_conf['node_info']['macvlan_intf']
        out_conf['parent_intf'] = parent_intf
    if 'node_host_pass' not in hb_conf:
        repo_pass = hb_conf['docker']['repo_pass']
        out_conf['node_host_pass'] = repo_pass
    if 'minions' in hb_conf and isinstance(hb_conf['minions'], list):
        out_conf['hostname'] = hb_conf['minions'][0]
    if 'minions' in hb_conf and isinstance(hb_conf['minions'], list):
        out_conf['hostname'] = hb_conf['minions'][0]
    if not out_conf.get('k8s_version'):
        out_conf['k8s_version'] = consts.DFLT_K8S_VERSION
    if not out_conf.get('kubespray_url'):
        out_conf['kubespray_url'] = consts.DFLT_KUBESPRAY_URL
    if not out_conf.get('kubespray_branch'):
        out_conf['kubespray_branch'] = consts.DFLT_KUBESPRAY_BRANCH
    if 'api_host' in hb_conf:
        out_conf['api_host'] = hb_conf['api_host']
    env = Environment(loader=FileSystemLoader(
        searchpath=os.path.dirname(consts.K8S_DEPLOY_TMPLT)))
    template = env.get_template(os.path.basename(consts.K8S_DEPLOY_TMPLT))
    env_str = template.render(**out_conf)
    out_dict = yaml.safe_load(StringIO(env_str))
    if hb_conf.get('Persistent_Volumes'):
        out_dict['kubernetes']['Persistent_Volumes'] = hb_conf.get(
            'Persistent_Volumes')
    if hb_conf.get('Networks'):
        out_dict['kubernetes']['Networks'] = hb_conf.get('Networks')
    if hb_conf.get('secrets'):
        out_dict['kubernetes']['secrets'] = hb_conf.get('secrets')
    if hb_conf.get('proxies'):
        out_dict['kubernetes']['proxies'] = hb_conf.get('proxies')
    if hb_conf.get('kubespray_proxies'):
        out_dict['kubernetes']['kubespray_proxies'] = hb_conf.get(
            'kubespray_proxies')
    if hb_conf.get('enable_kubevirt'):
        out_dict['enable_kubevirt'] = hb_conf['enable_kubevirt']
    if hb_conf.get('enable_ovs_dpdk'):
        out_dict['enable_ovs_dpdk'] = hb_conf['enable_ovs_dpdk']
    if hb_conf.get('enable_prometheus_grafana'):
        out_dict['enable_prometheus_grafana'] = hb_conf['enable_prometheus_grafana']
    if hb_conf.get('enable_dcgm'):
        out_dict['enable_dcgm'] = hb_conf['enable_dcgm']
    if hb_conf.get('enable_gpu_share'):
        out_dict['enable_gpu_share'] = hb_conf['enable_gpu_share']
    if hb_conf.get('enable_ceph_rook'):
        out_dict['enable_ceph_rook'] = hb_conf['enable_ceph_rook']
    if hb_conf.get('enable_edgefs_rook'):
        out_dict['enable_edgefs_rook'] = hb_conf['enable_edgefs_rook']
    return out_dict | 
	def __generate_base_k8s_config(boot_conf, hb_conf):
    """
    Generates a snaps-kubernetes configuration dict without any
    node_configuration
    :param boot_conf: the snaps-boot config dict
    :param hb_conf: the adrenaline config dict
    :return: a dict
    """
    out_conf = dict()
    out_conf.update(hb_conf)
    if 'master_ip' not in hb_conf:
        repo_node = hb_conf['docker']['repo_host']
        ip = get_node_ip(boot_conf, repo_node)
        out_conf['master_ip'] = ip
    if 'parent_intf' not in hb_conf:
        parent_intf = hb_conf['node_info']['macvlan_intf']
        out_conf['parent_intf'] = parent_intf
    if 'node_host_pass' not in hb_conf:
        repo_pass = hb_conf['docker']['repo_pass']
        out_conf['node_host_pass'] = repo_pass
    if 'minions' in hb_conf and isinstance(hb_conf['minions'], list):
        out_conf['hostname'] = hb_conf['minions'][0]
    if 'minions' in hb_conf and isinstance(hb_conf['minions'], list):
        out_conf['hostname'] = hb_conf['minions'][0]
    if not out_conf.get('k8s_version'):
        out_conf['k8s_version'] = consts.DFLT_K8S_VERSION
    if not out_conf.get('kubespray_url'):
        out_conf['kubespray_url'] = consts.DFLT_KUBESPRAY_URL
    if not out_conf.get('kubespray_branch'):
        out_conf['kubespray_branch'] = consts.DFLT_KUBESPRAY_BRANCH
    if 'api_host' in hb_conf:
        out_conf['api_host'] = hb_conf['api_host']
    env = Environment(loader=FileSystemLoader(
        searchpath=os.path.dirname(consts.K8S_DEPLOY_TMPLT)))
    template = env.get_template(os.path.basename(consts.K8S_DEPLOY_TMPLT))
    env_str = template.render(**out_conf)
    out_dict = yaml.safe_load(StringIO(env_str))
    if hb_conf.get('Persistent_Volumes'):
        out_dict['kubernetes']['Persistent_Volumes'] = hb_conf.get(
            'Persistent_Volumes')
    if hb_conf.get('Networks'):
        out_dict['kubernetes']['Networks'] = hb_conf.get('Networks')
    if hb_conf.get('secrets'):
        out_dict['kubernetes']['secrets'] = hb_conf.get('secrets')
    if hb_conf.get('proxies'):
        out_dict['kubernetes']['proxies'] = hb_conf.get('proxies')
    if hb_conf.get('kubespray_proxies'):
        out_dict['kubernetes']['kubespray_proxies'] = hb_conf.get(
            'kubespray_proxies')
    if hb_conf.get('enable_kubevirt'):
        out_dict['enable_kubevirt'] = hb_conf['enable_kubevirt']
    if hb_conf.get('enable_ovs_dpdk'):
        out_dict['enable_ovs_dpdk'] = hb_conf['enable_ovs_dpdk']
    if hb_conf.get('enable_prometheus_grafana'):
        out_dict['enable_prometheus_grafana'] = hb_conf['enable_prometheus_grafana']
    if hb_conf.get('enable_dcgm'):
        out_dict['enable_dcgm'] = hb_conf['enable_dcgm']
    if hb_conf.get('enable_gpu_share'):
        out_dict['enable_gpu_share'] = hb_conf['enable_gpu_share']
    if hb_conf.get('enable_ceph_rook'):
        out_dict['enable_ceph_rook'] = hb_conf['enable_ceph_rook']
    if hb_conf.get('enable_edgefs_rook'):
        out_dict['enable_edgefs_rook'] = hb_conf['enable_edgefs_rook']
    return out_dict | 
| 
	Python | 
	def deploy(boot_conf, hb_conf, user, os_env_file=None, boot_timeout=1800):
    """
    Installs and sets up PXE bootable machines with and OS and network
    configuration
    :param boot_conf: boot configuration dict
    :param hb_conf: adrenaline configuration dict
    :param user: the sudo user used to apply the playbook
    :param os_env_file: when environment is on OpenStack, this file is required
    :param boot_timeout: number of seconds to wait for PXE booting to complete
    :raises: Exception should snaps-boot fail to execute successfully
    """
    # Install and setup Digital Rebar
    # add post_script file to boot_conf dict
    ps_file = pkg_resources.resource_filename(
        'snaps_adrenaline.deployment.boot', 'post_script')
    ovs_dpdk_enabled = hb_conf['enable_ovs_dpdk']
    if ovs_dpdk_enabled == 'true':
        logger.info('ovs-dpdk:true: ON the post-script ovs-dpdk-flag')
        for line in fileinput.input(ps_file, inplace=True):
            print line.replace('OVS_DPDK_FLAG="OFF"', 'OVS_DPDK_FLAG="ON"'),
    if ovs_dpdk_enabled == 'false':
        logger.info('ovs-dpdk:false: OFF the post-script ovs-dpdk-flag')
        for line in fileinput.input(ps_file, inplace=True):
            print line.replace('OVS_DPDK_FLAG="ON"', 'OVS_DPDK_FLAG="OFF"'),
    pxe_config = boot_conf['PROVISION']['TFTP']['pxe_server_configuration']
    pxe_config['ubuntu']['post_script_location'] = ps_file
    rebar_utils.install_config_drp(consts.REBAR_SESSION, boot_conf)
    # Reboot for pxelinux.0 download and installation
    if os_env_file:
        __reboot_openstack_nodes(os_env_file)
    else:
        if boot_conf['PROVISION'].get('HYPERVISOR'):
            __reboot_libvirt_nodes(boot_conf)
        else:
            ipmi_utils.reboot_pxe(boot_conf)
    __block_until_complete(boot_conf, boot_timeout, suspend=450)
    if not os_env_file:
        try:
            pxe_utils.static_ip_configure(boot_conf)
        except Exception as e:
            logger.warn('Unexpected exception configuring NICs trying once'
                        ' more again in 60 seconds - [%s]', e)
            time.sleep(60)
            pxe_utils.static_ip_configure(boot_conf)
    else:
        # TODO - make the default MTU setting configurable for OpenStack
        __override_default_mtu(boot_conf)
    __setup_gpu(boot_conf, hb_conf, user)
    __setup_fpga(boot_conf, hb_conf, user)
    __setup_ovs_dpdk(boot_conf, hb_conf, user) 
    __post_hw_setup_reboot(boot_conf, hb_conf, user) | 
	def deploy(boot_conf, hb_conf, user, os_env_file=None, boot_timeout=1800):
    """
    Installs and sets up PXE bootable machines with and OS and network
    configuration
    :param boot_conf: boot configuration dict
    :param hb_conf: adrenaline configuration dict
    :param user: the sudo user used to apply the playbook
    :param os_env_file: when environment is on OpenStack, this file is required
    :param boot_timeout: number of seconds to wait for PXE booting to complete
    :raises: Exception should snaps-boot fail to execute successfully
    """
    # Install and setup Digital Rebar
    # add post_script file to boot_conf dict
    ps_file = pkg_resources.resource_filename(
        'snaps_adrenaline.deployment.boot', 'post_script')
    ovs_dpdk_enabled = hb_conf['enable_ovs_dpdk']
    if ovs_dpdk_enabled == 'true':
        logger.info('ovs-dpdk:true: ON the post-script ovs-dpdk-flag')
        for line in fileinput.input(ps_file, inplace=True):
            print line.replace('OVS_DPDK_FLAG="OFF"', 'OVS_DPDK_FLAG="ON"'),
    if ovs_dpdk_enabled == 'false':
        logger.info('ovs-dpdk:false: OFF the post-script ovs-dpdk-flag')
        for line in fileinput.input(ps_file, inplace=True):
            print line.replace('OVS_DPDK_FLAG="ON"', 'OVS_DPDK_FLAG="OFF"'),
    pxe_config = boot_conf['PROVISION']['TFTP']['pxe_server_configuration']
    pxe_config['ubuntu']['post_script_location'] = ps_file
    rebar_utils.install_config_drp(consts.REBAR_SESSION, boot_conf)
    # Reboot for pxelinux.0 download and installation
    if os_env_file:
        __reboot_openstack_nodes(os_env_file)
    else:
        if boot_conf['PROVISION'].get('HYPERVISOR'):
            __reboot_libvirt_nodes(boot_conf)
        else:
            ipmi_utils.reboot_pxe(boot_conf)
    __block_until_complete(boot_conf, boot_timeout, suspend=450)
    if not os_env_file:
        try:
            pxe_utils.static_ip_configure(boot_conf)
        except Exception as e:
            logger.warn('Unexpected exception configuring NICs trying once'
                        ' more again in 60 seconds - [%s]', e)
            time.sleep(60)
            pxe_utils.static_ip_configure(boot_conf)
    else:
        # TODO - make the default MTU setting configurable for OpenStack
        __override_default_mtu(boot_conf)
    __setup_gpu(boot_conf, hb_conf, user)
    __setup_fpga(boot_conf, hb_conf, user)
    __setup_ovs_dpdk(boot_conf, hb_conf, user) 
    __post_hw_setup_reboot(boot_conf, hb_conf, user) | 
| 
	Python | 
	def __setup_build_server(boot_conf):
    """
    This function is responsible for creating the DHCP server and starting the
    PXE boot process
    :param boot_conf: boot configuration dict
    """
    rebar_utils.install_config_drp(consts.REBAR_SESSION, boot_conf) | 
	def __setup_build_server(boot_conf):
    """
    This function is responsible for creating the DHCP server and starting the
    PXE boot process
    :param boot_conf: boot configuration dict
    """
    rebar_utils.install_config_drp(consts.REBAR_SESSION, boot_conf) | 
| 
	Python | 
	def __override_default_mtu(boot_conf, mtu=1400):
    """
    Reboots baremetal nodes via an Ansible call to iaas_launch.py
    :param boot_conf: the snaps-boot configuration dict
    :param mtu: the MTU value for the nodes (default 1400)
    """
    logger.info('Setting default MTU')
    hosts = config_utils.get_node_ips_from_config(boot_conf)
    ansible_utils.apply_playbook(
        consts.OVERRIDE_DFLT_MTU_PB, hosts, 'root', variables={'mtu': mtu})
    logger.info('Completed MTU reconfiguration on k8s nodes') | 
	def __override_default_mtu(boot_conf, mtu=1400):
    """
    Reboots baremetal nodes via an Ansible call to iaas_launch.py
    :param boot_conf: the snaps-boot configuration dict
    :param mtu: the MTU value for the nodes (default 1400)
    """
    logger.info('Setting default MTU')
    hosts = config_utils.get_node_ips_from_config(boot_conf)
    ansible_utils.apply_playbook(
        consts.OVERRIDE_DFLT_MTU_PB, hosts, 'root', variables={'mtu': mtu})
    logger.info('Completed MTU reconfiguration on k8s nodes') | 
| 
	Python | 
	def __block_until_complete(boot_conf, timeout, suspend=0):
    """
    Function that blocks until all nodes have SSH ports opened
    :param boot_conf: boot configuration dict
    :param timeout: boot configuration dict
    :param suspend: the number of seconds to wait before polling
    :return:
    """
    host_ips = config_utils.get_node_ips_from_config(boot_conf)
    host_ip_status = dict()
    for host_ip in host_ips:
        host_ip_status[host_ip] = False
    if suspend > 0:
        logger.info('Waiting %s seconds before polling IPs %s for SSH',
                    suspend, host_ips)
        time.sleep(suspend)
    user = config_utils.get_node_user(boot_conf)
    password = config_utils.get_node_pass(boot_conf)
    logger.info('Checking nodes for SSH on %s, user - [%s], pass - [%s]',
                host_ips, user, password)
    all_completed = True
    start = time.time()
    while timeout > time.time() - start:
        all_completed = True
        for host_ip in host_ips:
            if not host_ip_status[host_ip]:
                logger.debug(
                    'Attempting to obtain ssh client - IP [%s], user - [%s],'
                    ' pass - [%s]', host_ip, user, password)
                ssh_client = ssh_utils.ssh_client(
                    host_ip, user, password=password)
                if ssh_client:
                    logger.info('Obtained ssh client to IP [%s]', host_ip)
                    if __drp_boot_complete(ssh_client):
                        host_ip_status[host_ip] = True
        for host_ip, status in host_ip_status.items():
            if not status:
                all_completed = False
                continue
        if all_completed:
            break
        time.sleep(10)
    if not all_completed:
        logger.error('Timeout connecting to all nodes - %s', host_ips)
        raise Exception('Timeout waiting for nodes to finish booting')
    logger.info('Connected to all nodes') | 
	def __block_until_complete(boot_conf, timeout, suspend=0):
    """
    Function that blocks until all nodes have SSH ports opened
    :param boot_conf: boot configuration dict
    :param timeout: boot configuration dict
    :param suspend: the number of seconds to wait before polling
    :return:
    """
    host_ips = config_utils.get_node_ips_from_config(boot_conf)
    host_ip_status = dict()
    for host_ip in host_ips:
        host_ip_status[host_ip] = False
    if suspend > 0:
        logger.info('Waiting %s seconds before polling IPs %s for SSH',
                    suspend, host_ips)
        time.sleep(suspend)
    user = config_utils.get_node_user(boot_conf)
    password = config_utils.get_node_pass(boot_conf)
    logger.info('Checking nodes for SSH on %s, user - [%s], pass - [%s]',
                host_ips, user, password)
    all_completed = True
    start = time.time()
    while timeout > time.time() - start:
        all_completed = True
        for host_ip in host_ips:
            if not host_ip_status[host_ip]:
                logger.debug(
                    'Attempting to obtain ssh client - IP [%s], user - [%s],'
                    ' pass - [%s]', host_ip, user, password)
                ssh_client = ssh_utils.ssh_client(
                    host_ip, user, password=password)
                if ssh_client:
                    logger.info('Obtained ssh client to IP [%s]', host_ip)
                    if __drp_boot_complete(ssh_client):
                        host_ip_status[host_ip] = True
        for host_ip, status in host_ip_status.items():
            if not status:
                all_completed = False
                continue
        if all_completed:
            break
        time.sleep(10)
    if not all_completed:
        logger.error('Timeout connecting to all nodes - %s', host_ips)
        raise Exception('Timeout waiting for nodes to finish booting')
    logger.info('Connected to all nodes') | 
| 
	Python | 
	def undeploy(boot_conf):
    """
    Cleans up the PXE imaged machines
    :param boot_conf: boot configuration dict
    :raises: Exception should snaps-kubernetes fail to undeploy successfully
    """
    rebar_utils.cleanup_drp(consts.REBAR_SESSION, boot_conf)
    # TODO/FIXME - add pb to delete contents of /tmp and ~/.ssh/known_hosts | 
	def undeploy(boot_conf):
    """
    Cleans up the PXE imaged machines
    :param boot_conf: boot configuration dict
    :raises: Exception should snaps-kubernetes fail to undeploy successfully
    """
    rebar_utils.cleanup_drp(consts.REBAR_SESSION, boot_conf)
    # TODO/FIXME - add pb to delete contents of /tmp and ~/.ssh/known_hosts | 
| 
	Python | 
	def __installation_logs(cmdln_args):
    """
     This will initialize the logging for Kubernetes installation
     :param cmdln_args : the command line arguments
    """
    level_value = cmdln_args.log_level
    log_file_name = consts.K8_INSTALLATION_LOGS
    if level_value.upper() == 'INFO':
        level_value = logging.INFO
    elif level_value.upper() == 'ERROR':
        level_value = logging.ERROR
    elif level_value.upper() == 'DEBUG':
        level_value = logging.DEBUG
    elif level_value.upper() == 'WARNING':
        level_value = logging.WARNING
    elif level_value.upper() == 'CRITICAL':
        level_value = logging.CRITICAL
    else:
        print("Incorrect log level %s received as input from user" %
              level_value)
        exit(1)
    logger.setLevel(level_value)
    log_output = cmdln_args.log_out
    if log_output == 'stderr':
        logging.basicConfig(level=logging.DEBUG)
    elif log_output == 'stdout':
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    else:
        logging.basicConfig(
            format='%(asctime)s %(levelname)s [%(filename)s:'
                   '%(lineno)s - %(funcName)2s() ] %(message)s ',
            datefmt='%b %d %H:%M', filename=log_file_name, filemode='w',
            level=level_value)
        logging.getLogger().addHandler(logging.StreamHandler()) | 
	def __installation_logs(cmdln_args):
    """
     This will initialize the logging for Kubernetes installation
     :param cmdln_args : the command line arguments
    """
    level_value = cmdln_args.log_level
    log_file_name = consts.K8_INSTALLATION_LOGS
    if level_value.upper() == 'INFO':
        level_value = logging.INFO
    elif level_value.upper() == 'ERROR':
        level_value = logging.ERROR
    elif level_value.upper() == 'DEBUG':
        level_value = logging.DEBUG
    elif level_value.upper() == 'WARNING':
        level_value = logging.WARNING
    elif level_value.upper() == 'CRITICAL':
        level_value = logging.CRITICAL
    else:
        print("Incorrect log level %s received as input from user" %
              level_value)
        exit(1)
    logger.setLevel(level_value)
    log_output = cmdln_args.log_out
    if log_output == 'stderr':
        logging.basicConfig(level=logging.DEBUG)
    elif log_output == 'stdout':
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    else:
        logging.basicConfig(
            format='%(asctime)s %(levelname)s [%(filename)s:'
                   '%(lineno)s - %(funcName)2s() ] %(message)s ',
            datefmt='%b %d %H:%M', filename=log_file_name, filemode='w',
            level=level_value)
        logging.getLogger().addHandler(logging.StreamHandler()) | 
| 
	Python | 
	def __manage_keys(config):
    """
    Creates and pushes SSH keys when necessary
    """
    logger.info('Managing SSH keys')
    nodes_info = config_utils.get_nodes_ip_name_type(config)
    for hostname, ip, node_type in nodes_info:
        ssh_client = ssh_utils.ssh_client(ip, 'root')
        if not ssh_client:
            logger.debug('Creating and injecting key to %s', ip)
            password = config_utils.get_node_password(config, hostname)
            ansible_utils.apply_playbook(consts.MANAGE_KEYS, variables={
                'ip': ip, 'password': password})
        else:
            logger.debug('Key already exists')
    docker_repo = config_utils.get_docker_repo(config)
    if docker_repo and isinstance(docker_repo, dict):
        ip = docker_repo[consts.IP_KEY]
        ssh_client = ssh_utils.ssh_client(ip, 'root')
        if not ssh_client:
            logger.debug('Creating and injecting key to %s', ip)
            password = docker_repo[consts.PASSWORD_KEY]
            ansible_utils.apply_playbook(consts.MANAGE_KEYS, variables={
                'ip': ip, 'password': password})
        else:
            logger.debug('Key already exists') | 
	def __manage_keys(config):
    """
    Creates and pushes SSH keys when necessary
    """
    logger.info('Managing SSH keys')
    nodes_info = config_utils.get_nodes_ip_name_type(config)
    for hostname, ip, node_type in nodes_info:
        ssh_client = ssh_utils.ssh_client(ip, 'root')
        if not ssh_client:
            logger.debug('Creating and injecting key to %s', ip)
            password = config_utils.get_node_password(config, hostname)
            ansible_utils.apply_playbook(consts.MANAGE_KEYS, variables={
                'ip': ip, 'password': password})
        else:
            logger.debug('Key already exists')
    docker_repo = config_utils.get_docker_repo(config)
    if docker_repo and isinstance(docker_repo, dict):
        ip = docker_repo[consts.IP_KEY]
        ssh_client = ssh_utils.ssh_client(ip, 'root')
        if not ssh_client:
            logger.debug('Creating and injecting key to %s', ip)
            password = docker_repo[consts.PASSWORD_KEY]
            ansible_utils.apply_playbook(consts.MANAGE_KEYS, variables={
                'ip': ip, 'password': password})
        else:
            logger.debug('Key already exists') | 
| 
	Python | 
	def run(arguments):
    """
     This will launch the provisioning of Bare metal & IaaS.
     There is pxe based configuration defined to provision the bare metal.
     For IaaS provisioning different deployment models are supported.
     Relevant conf files related to PXE based Hw provisioning & IaaS must be
     present in ./conf folder.
     :param arguments: This expects command line options to be entered by user
                       for relevant operations.
     :return: To the OS
    """
    __installation_logs(arguments)
    logger.info('Launching Operation Starts ........')
    dir_path = os.path.dirname(os.path.realpath(__file__))
    export_path = dir_path + "/"
    os.environ['CWD_IAAS'] = export_path
    logger.info('Current Exported Relevant Path - %s', export_path)
    config_file = os.path.abspath(os.path.expanduser(arguments.config))
    config = file_utils.read_yaml(config_file)
    logger.info('Read configuration file - %s', config_file)
    __manage_keys(config)
    if arguments.deploy_kubernetes:
        __launcher_conf()
        validate_deployment_file(config)
        k8_utils.execute(config)
    if arguments.clean_kubernetes:
        k8_utils.clean_k8(config) | 
	def run(arguments):
    """
     This will launch the provisioning of Bare metal & IaaS.
     There is pxe based configuration defined to provision the bare metal.
     For IaaS provisioning different deployment models are supported.
     Relevant conf files related to PXE based Hw provisioning & IaaS must be
     present in ./conf folder.
     :param arguments: This expects command line options to be entered by user
                       for relevant operations.
     :return: To the OS
    """
    __installation_logs(arguments)
    logger.info('Launching Operation Starts ........')
    dir_path = os.path.dirname(os.path.realpath(__file__))
    export_path = dir_path + "/"
    os.environ['CWD_IAAS'] = export_path
    logger.info('Current Exported Relevant Path - %s', export_path)
    config_file = os.path.abspath(os.path.expanduser(arguments.config))
    config = file_utils.read_yaml(config_file)
    logger.info('Read configuration file - %s', config_file)
    __manage_keys(config)
    if arguments.deploy_kubernetes:
        __launcher_conf()
        validate_deployment_file(config)
        k8_utils.execute(config)
    if arguments.clean_kubernetes:
        k8_utils.clean_k8(config) | 
| 
	Python | 
	def validate_nodes(k8s_conf):
    """
    Validation of the configured kubernetes nodes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8 Nodes')
    core_client = k8s_core_client(k8s_conf)
    node_list = core_client.list_node()
    node_items = node_list.items
    masters_tuple3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    master_names = list()
    for name, ip, node_type in masters_tuple3:
        master_names.append(name)
    minions_tuple3 = config_utils.get_minion_nodes_ip_name_type(k8s_conf)
    minion_names = list()
    for name, ip, node_type in minions_tuple3:
        minion_names.append(name)
    master_count = 0
    for node_item in node_items:
        node_meta = node_item.metadata
        node_status = node_item.status
        node_conditions = node_status.conditions
        kubelet_reason = False
        for node_condition in node_conditions:
            if node_condition.reason == 'KubeletReady':
                if node_condition.status != 'True':
                    raise ClusterDeploymentException(
                        'node_condition.status is [{}]'.format
                        (node_condition.status))
                if node_condition.type != 'Ready':
                    raise ClusterDeploymentException(
                        'node_condition.type is [{}]'.format(
                            node_condition.type))
            kubelet_reason = True
        if not kubelet_reason:
            raise ClusterDeploymentException(
                'Could not determine the state of all nodes')
        node_info = node_status.node_info
        node_kubelet_version = node_info.kubelet_version
        expected_version = config_utils.get_version(k8s_conf)
        if node_kubelet_version != expected_version:
            raise ClusterDeploymentException(
                'Unexpected kubelet_version [{}] - expected [{}]'.format(
                    node_kubelet_version, expected_version))
        logger.debug('Expected version [%s] == actual [%s]',
                     expected_version, node_kubelet_version)
        node_name = node_meta.name
        node_labels = node_meta.labels
        if node_labels.get('node-role.kubernetes.io/master') is not None:
            if node_name not in master_names:
                raise ClusterDeploymentException(
                    'Node [{}] is not a master'.format(node_name))
            master_count += 1
            logger.debug('Master found with name [%s]', node_name)
        # if node_labels.get('node-role.kubernetes.io/node') is not None:
        #     if node_name not in minion_names:
        #         raise ClusterDeploymentException(
        #             'Node [{}] is not a minion'.format(node_name))
        #
        #     minion_count += 1
        #     logger.debug('Minion found with name [%s]', node_name)
    if master_count != len(masters_tuple3):
        raise ClusterDeploymentException(
            'Expected number of masters [{}] - actual [{}]'.format(
                len(masters_tuple3), master_count))
    logger.debug('Number of masters [%s]', master_count)
    # if minion_count != len(minions_tuple3):
    #     raise ClusterDeploymentException(
    #         'Expected number of minions [{}] - actual [{}]'.format(
    #             len(minions_tuple3), minion_count))
    # logger.debug('Number of minions [%s]', minion_count) | 
	def validate_nodes(k8s_conf):
    """
    Validation of the configured kubernetes nodes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8 Nodes')
    core_client = k8s_core_client(k8s_conf)
    node_list = core_client.list_node()
    node_items = node_list.items
    masters_tuple3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    master_names = list()
    for name, ip, node_type in masters_tuple3:
        master_names.append(name)
    minions_tuple3 = config_utils.get_minion_nodes_ip_name_type(k8s_conf)
    minion_names = list()
    for name, ip, node_type in minions_tuple3:
        minion_names.append(name)
    master_count = 0
    for node_item in node_items:
        node_meta = node_item.metadata
        node_status = node_item.status
        node_conditions = node_status.conditions
        kubelet_reason = False
        for node_condition in node_conditions:
            if node_condition.reason == 'KubeletReady':
                if node_condition.status != 'True':
                    raise ClusterDeploymentException(
                        'node_condition.status is [{}]'.format
                        (node_condition.status))
                if node_condition.type != 'Ready':
                    raise ClusterDeploymentException(
                        'node_condition.type is [{}]'.format(
                            node_condition.type))
            kubelet_reason = True
        if not kubelet_reason:
            raise ClusterDeploymentException(
                'Could not determine the state of all nodes')
        node_info = node_status.node_info
        node_kubelet_version = node_info.kubelet_version
        expected_version = config_utils.get_version(k8s_conf)
        if node_kubelet_version != expected_version:
            raise ClusterDeploymentException(
                'Unexpected kubelet_version [{}] - expected [{}]'.format(
                    node_kubelet_version, expected_version))
        logger.debug('Expected version [%s] == actual [%s]',
                     expected_version, node_kubelet_version)
        node_name = node_meta.name
        node_labels = node_meta.labels
        if node_labels.get('node-role.kubernetes.io/master') is not None:
            if node_name not in master_names:
                raise ClusterDeploymentException(
                    'Node [{}] is not a master'.format(node_name))
            master_count += 1
            logger.debug('Master found with name [%s]', node_name)
        # if node_labels.get('node-role.kubernetes.io/node') is not None:
        #     if node_name not in minion_names:
        #         raise ClusterDeploymentException(
        #             'Node [{}] is not a minion'.format(node_name))
        #
        #     minion_count += 1
        #     logger.debug('Minion found with name [%s]', node_name)
    if master_count != len(masters_tuple3):
        raise ClusterDeploymentException(
            'Expected number of masters [{}] - actual [{}]'.format(
                len(masters_tuple3), master_count))
    logger.debug('Number of masters [%s]', master_count)
    # if minion_count != len(minions_tuple3):
    #     raise ClusterDeploymentException(
    #         'Expected number of minions [{}] - actual [{}]'.format(
    #             len(minions_tuple3), minion_count))
    # logger.debug('Number of minions [%s]', minion_count) | 
| 
	Python | 
	def validate_k8s_system(k8s_conf):
    """
    Validation of the configured kubernetes system
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s System')
    core_client = k8s_core_client(k8s_conf)
    pod_items = validate_pods_by_namespace(core_client, 'kube-system')
    pod_services = __get_pod_service_list(pod_items)
    logger.debug('kube-system pod_services - %s', pod_services)
    if 'kubernetes-dashboard' not in pod_services:
        raise ClusterDeploymentException(
            'kubernetes-dashboard service not found')
    if 'coredns' not in pod_services:
        raise ClusterDeploymentException('coredns service not found')
    for name, ip, node_type in config_utils.get_master_nodes_ip_name_type(
            k8s_conf):
        if 'kube-apiserver-{}'.format(name) not in pod_services:
            raise ClusterDeploymentException(
                'kube-apiserver-%s service not found', name)
        if 'kube-scheduler-{}'.format(name) not in pod_services:
            raise ClusterDeploymentException(
                'kube-scheduler-%s service not found', name)
    if config_utils.is_metrics_server_enabled(k8s_conf):
        if 'metrics-server' not in pod_services:
            raise ClusterDeploymentException(
                'metrics-server service not found')
    logger.debug('pod_services - %s', pod_services)
    if config_utils.is_helm_enabled(k8s_conf):
        if 'tiller' not in pod_services:
            raise ClusterDeploymentException(
                'tiller service not found') | 
	def validate_k8s_system(k8s_conf):
    """
    Validation of the configured kubernetes system
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s System')
    core_client = k8s_core_client(k8s_conf)
    pod_items = validate_pods_by_namespace(core_client, 'kube-system')
    pod_services = __get_pod_service_list(pod_items)
    logger.debug('kube-system pod_services - %s', pod_services)
    if 'kubernetes-dashboard' not in pod_services:
        raise ClusterDeploymentException(
            'kubernetes-dashboard service not found')
    if 'coredns' not in pod_services:
        raise ClusterDeploymentException('coredns service not found')
    for name, ip, node_type in config_utils.get_master_nodes_ip_name_type(
            k8s_conf):
        if 'kube-apiserver-{}'.format(name) not in pod_services:
            raise ClusterDeploymentException(
                'kube-apiserver-%s service not found', name)
        if 'kube-scheduler-{}'.format(name) not in pod_services:
            raise ClusterDeploymentException(
                'kube-scheduler-%s service not found', name)
    if config_utils.is_metrics_server_enabled(k8s_conf):
        if 'metrics-server' not in pod_services:
            raise ClusterDeploymentException(
                'metrics-server service not found')
    logger.debug('pod_services - %s', pod_services)
    if config_utils.is_helm_enabled(k8s_conf):
        if 'tiller' not in pod_services:
            raise ClusterDeploymentException(
                'tiller service not found') | 
| 
	Python | 
	def validate_rook(k8s_conf):
    """
    Validation of the expected rook services
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate rook-ceph services')
    core_client = k8s_core_client(k8s_conf)
    validate_pods_by_namespace(core_client, 'rook-ceph-system')
    storage_class_names = __get_storageclass_names(k8s_conf)
    logger.debug('storage_class_names - %s', storage_class_names)
    if 'rook-ceph-block' not in storage_class_names:
        raise ClusterDeploymentException(
            'Storage class rook-ceph-block is not found') | 
	def validate_rook(k8s_conf):
    """
    Validation of the expected rook services
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate rook-ceph services')
    core_client = k8s_core_client(k8s_conf)
    validate_pods_by_namespace(core_client, 'rook-ceph-system')
    storage_class_names = __get_storageclass_names(k8s_conf)
    logger.debug('storage_class_names - %s', storage_class_names)
    if 'rook-ceph-block' not in storage_class_names:
        raise ClusterDeploymentException(
            'Storage class rook-ceph-block is not found') | 
| 
	Python | 
	def validate_cni(k8s_conf):
    """
    Validation of the configured kubernetes CNIs and network elements
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s CNIs')
    __validate_cni_pods(k8s_conf)
    __validate_cni_networks(k8s_conf) | 
	def validate_cni(k8s_conf):
    """
    Validation of the configured kubernetes CNIs and network elements
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s CNIs')
    __validate_cni_pods(k8s_conf)
    __validate_cni_networks(k8s_conf) | 
| 
	Python | 
	def __validate_cni_pods(k8s_conf):
    """
    Validates that the expected CNI pods are running
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s CNI Pods')
    core_client = k8s_core_client(k8s_conf)
    pod_items = validate_pods_by_namespace(core_client, 'kube-system')
    pod_services = __get_pod_service_list(pod_items)
    logger.debug('pod_services - %s', pod_services)
    net_plugin = config_utils.get_networking_plugin(k8s_conf)
    if net_plugin == consts.WEAVE_TYPE:
        if 'weave-net' not in pod_services:
            raise ClusterDeploymentException('weave-net service not found')
    elif net_plugin == consts.FLANNEL_TYPE:
        if 'flannel' not in pod_services:
            raise ClusterDeploymentException('flannel service not found')
    elif net_plugin == 'contiv':
        if 'contiv-netplugin' not in pod_services:
            raise ClusterDeploymentException(
                'contiv-netplugin service not found')
    elif net_plugin == 'calico':
        if 'calico-kube-controllers' not in pod_services:
            raise ClusterDeploymentException('calico-net service not found')
    elif net_plugin == 'cilium':
        if 'cilium-net' not in pod_services:
            raise ClusterDeploymentException('cilium-net service not found') | 
	def __validate_cni_pods(k8s_conf):
    """
    Validates that the expected CNI pods are running
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s CNI Pods')
    core_client = k8s_core_client(k8s_conf)
    pod_items = validate_pods_by_namespace(core_client, 'kube-system')
    pod_services = __get_pod_service_list(pod_items)
    logger.debug('pod_services - %s', pod_services)
    net_plugin = config_utils.get_networking_plugin(k8s_conf)
    if net_plugin == consts.WEAVE_TYPE:
        if 'weave-net' not in pod_services:
            raise ClusterDeploymentException('weave-net service not found')
    elif net_plugin == consts.FLANNEL_TYPE:
        if 'flannel' not in pod_services:
            raise ClusterDeploymentException('flannel service not found')
    elif net_plugin == 'contiv':
        if 'contiv-netplugin' not in pod_services:
            raise ClusterDeploymentException(
                'contiv-netplugin service not found')
    elif net_plugin == 'calico':
        if 'calico-kube-controllers' not in pod_services:
            raise ClusterDeploymentException('calico-net service not found')
    elif net_plugin == 'cilium':
        if 'cilium-net' not in pod_services:
            raise ClusterDeploymentException('cilium-net service not found') | 
| 
	Python | 
	def __validate_cni_networks(k8s_conf):
    """
    Validates that the expected CNI networks have been deployed
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s CNI Networks')
    net_client = k8s_net_client(k8s_conf)
    net_policies = net_client.list_network_policy_for_all_namespaces()
    logger.debug('net_policies - %s', net_policies)
    custom_obj_client = k8s_custom_client(k8s_conf)
    policies = custom_obj_client.list_cluster_custom_object(
        'networking.k8s.io', 'v1', 'networkpolicies')
    logger.debug('policies - %s', policies)
    # TODO/FIXME - Once overlay network objects are being created, attempt to
    # TODO/FIXME - query and validate here | 
	def __validate_cni_networks(k8s_conf):
    """
    Validates that the expected CNI networks have been deployed
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s CNI Networks')
    net_client = k8s_net_client(k8s_conf)
    net_policies = net_client.list_network_policy_for_all_namespaces()
    logger.debug('net_policies - %s', net_policies)
    custom_obj_client = k8s_custom_client(k8s_conf)
    policies = custom_obj_client.list_cluster_custom_object(
        'networking.k8s.io', 'v1', 'networkpolicies')
    logger.debug('policies - %s', policies)
    # TODO/FIXME - Once overlay network objects are being created, attempt to
    # TODO/FIXME - query and validate here | 
| 
	Python | 
	def validate_pods_by_namespace(core_client, namespace):
    """
    Validates that all of the pods for a given namespace are operational
    :param core_client: the kubernetes API client
    :param namespace: the namespace of the pod to add into the return list
    :return: list of pod item objects
    """
    pod_items = __get_pods_by_namespace(core_client, namespace)
    pod_status = __get_pod_name_statuses(pod_items)
    for pod_name, pod_running in pod_status.items():
        if not pod_running:
            raise ClusterDeploymentException(
                'Pod [{}] is not running as expected'.format(pod_name))
    return pod_items | 
	def validate_pods_by_namespace(core_client, namespace):
    """
    Validates that all of the pods for a given namespace are operational
    :param core_client: the kubernetes API client
    :param namespace: the namespace of the pod to add into the return list
    :return: list of pod item objects
    """
    pod_items = __get_pods_by_namespace(core_client, namespace)
    pod_status = __get_pod_name_statuses(pod_items)
    for pod_name, pod_running in pod_status.items():
        if not pod_running:
            raise ClusterDeploymentException(
                'Pod [{}] is not running as expected'.format(pod_name))
    return pod_items | 
| 
	Python | 
	def validate_volumes(k8s_conf):
    """
    Validation of the configured kubernetes volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    __validate_host_vols(k8s_conf)
    # TODO/FIXME - Add Ceph volume check after Ceph support has been fixed
    __validate_rook_vols(k8s_conf) | 
	def validate_volumes(k8s_conf):
    """
    Validation of the configured kubernetes volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    __validate_host_vols(k8s_conf)
    # TODO/FIXME - Add Ceph volume check after Ceph support has been fixed
    __validate_rook_vols(k8s_conf) | 
| 
	Python | 
	def validate_secrets(k8s_conf):
    """
    Validation that the configured kubernetes secrets has been created
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validating secrets')
    core_client = k8s_core_client(k8s_conf)
    deploy_secrets = core_client.list_secret_for_all_namespaces()
    logger.debug('Secrets - %s', deploy_secrets)
    secret_names = []
    secret_dict = {}
    for secret in deploy_secrets.items:
        secret_names.append(secret.metadata.name)
        secret_dict[secret.metadata.name] = secret
    logger.debug('secret_names - %s', secret_names)
    config_secrets = config_utils.get_secrets(k8s_conf)
    logger.debug('config_secrets - %s', config_secrets)
    if not config_secrets:
        config_secrets = []
    for config_secret in config_secrets:
        if not config_secret['name'] in secret_dict.keys():
            raise ClusterDeploymentException(
                'Secret name [{}] not in secret_names [{}]'.format(
                    config_secret['name'], secret_names))
        else:
            encoded_secret = secret_dict[config_secret['name']].data.get(
                '.dockerconfigjson')
            logger.debug('encoded_secret - %s', encoded_secret)
            decoded_secret_str = base64.b64decode(encoded_secret)
            decoded_secret = json.loads(decoded_secret_str)
            logger.debug('decoded_secret - %s', decoded_secret)
            if decoded_secret['auths'].get(config_secret['server']):
                decoded_secret_values = decoded_secret[
                    'auths'][config_secret['server']]
                logger.debug('decoded_secret_values - %s',
                             decoded_secret_values)
                if (decoded_secret_values['username'] != config_secret[
                        'user'] or
                        decoded_secret_values['password'] != config_secret[
                            'password'] or
                        decoded_secret_values['email'] != config_secret[
                            'email'] or
                        decoded_secret_values['password'] != config_secret[
                            'password']):
                    raise ClusterDeploymentException(
                        'Decoded secret [{}] not expected [{}]'.format(
                            decoded_secret_values, config_secret))
            else:
                raise ClusterDeploymentException(
                    'Could not decode created secret [{}]'.format(
                        config_secret)) | 
	def validate_secrets(k8s_conf):
    """
    Validation that the configured kubernetes secrets has been created
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validating secrets')
    core_client = k8s_core_client(k8s_conf)
    deploy_secrets = core_client.list_secret_for_all_namespaces()
    logger.debug('Secrets - %s', deploy_secrets)
    secret_names = []
    secret_dict = {}
    for secret in deploy_secrets.items:
        secret_names.append(secret.metadata.name)
        secret_dict[secret.metadata.name] = secret
    logger.debug('secret_names - %s', secret_names)
    config_secrets = config_utils.get_secrets(k8s_conf)
    logger.debug('config_secrets - %s', config_secrets)
    if not config_secrets:
        config_secrets = []
    for config_secret in config_secrets:
        if not config_secret['name'] in secret_dict.keys():
            raise ClusterDeploymentException(
                'Secret name [{}] not in secret_names [{}]'.format(
                    config_secret['name'], secret_names))
        else:
            encoded_secret = secret_dict[config_secret['name']].data.get(
                '.dockerconfigjson')
            logger.debug('encoded_secret - %s', encoded_secret)
            decoded_secret_str = base64.b64decode(encoded_secret)
            decoded_secret = json.loads(decoded_secret_str)
            logger.debug('decoded_secret - %s', decoded_secret)
            if decoded_secret['auths'].get(config_secret['server']):
                decoded_secret_values = decoded_secret[
                    'auths'][config_secret['server']]
                logger.debug('decoded_secret_values - %s',
                             decoded_secret_values)
                if (decoded_secret_values['username'] != config_secret[
                        'user'] or
                        decoded_secret_values['password'] != config_secret[
                            'password'] or
                        decoded_secret_values['email'] != config_secret[
                            'email'] or
                        decoded_secret_values['password'] != config_secret[
                            'password']):
                    raise ClusterDeploymentException(
                        'Decoded secret [{}] not expected [{}]'.format(
                            decoded_secret_values, config_secret))
            else:
                raise ClusterDeploymentException(
                    'Could not decode created secret [{}]'.format(
                        config_secret)) | 
| 
	Python | 
	def __validate_host_vols(k8s_conf):
    """
    Validation of the configured kubernetes volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s Host Volumes')
    pv_names = __get_pv_names(k8s_conf)
    host_vol_conf = __get_host_vol_dict(k8s_conf)
    for name, size in host_vol_conf.items():
        if name not in pv_names:
            raise ClusterDeploymentException(
                'Config for host volume [{}] not found'.format(name))
        else:
            pv_attrs = __get_pv_attrs(k8s_conf, name)
            if not pv_attrs[0].startswith(str(size)):
                raise ClusterDeploymentException(
                    'PV [{}] expected size is [{}] not [{}]'.format(
                        name, size, pv_attrs[0]))
    core_client = k8s_core_client(k8s_conf)
    pv_claims = core_client.list_persistent_volume_claim_for_all_namespaces()
    for pv_claim in pv_claims.items:
        for name, size in host_vol_conf.items():
            if pv_claim.metadata.name == name:
                actual_size = pv_claim.spec.resources.requests['storage']
                logger.debug('claim %s expected size - %s | actual_size - %s',
                             name, size, actual_size)
                if actual_size != size:
                    raise ClusterDeploymentException(
                        'Expeced size of PV claim [{}] of [{}] not equal '
                        'to [{}]', name, size, actual_size) | 
	def __validate_host_vols(k8s_conf):
    """
    Validation of the configured kubernetes volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s Host Volumes')
    pv_names = __get_pv_names(k8s_conf)
    host_vol_conf = __get_host_vol_dict(k8s_conf)
    for name, size in host_vol_conf.items():
        if name not in pv_names:
            raise ClusterDeploymentException(
                'Config for host volume [{}] not found'.format(name))
        else:
            pv_attrs = __get_pv_attrs(k8s_conf, name)
            if not pv_attrs[0].startswith(str(size)):
                raise ClusterDeploymentException(
                    'PV [{}] expected size is [{}] not [{}]'.format(
                        name, size, pv_attrs[0]))
    core_client = k8s_core_client(k8s_conf)
    pv_claims = core_client.list_persistent_volume_claim_for_all_namespaces()
    for pv_claim in pv_claims.items:
        for name, size in host_vol_conf.items():
            if pv_claim.metadata.name == name:
                actual_size = pv_claim.spec.resources.requests['storage']
                logger.debug('claim %s expected size - %s | actual_size - %s',
                             name, size, actual_size)
                if actual_size != size:
                    raise ClusterDeploymentException(
                        'Expeced size of PV claim [{}] of [{}] not equal '
                        'to [{}]', name, size, actual_size) | 
| 
	Python | 
	def __validate_rook_vols(k8s_conf):
    """
    Validation of the configured kubernetes volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s Rook Volumes')
    if config_utils.is_rook_enabled(k8s_conf):
        pv_names = __get_pv_names(k8s_conf)
        logger.debug('pv_names - %s', pv_names)
        for name, size, path in config_utils.get_rook_vol_info(k8s_conf):
            logger.debug('name - %s, size - %s, path - %s', name, size, path)
            if name not in pv_names:
                raise ClusterDeploymentException(
                    'Rook PV [{}] not found'.format(name))
            else:
                pv_attrs = __get_pv_attrs(k8s_conf, name)
                if not pv_attrs[0].startswith(str(size)):
                    raise ClusterDeploymentException(
                        'PV [{}] expected size is [{}] not [{}]'.format(
                            name, size, pv_attrs[0]))
                if not pv_attrs[1] is not path:
                    raise ClusterDeploymentException(
                        'PV [{}] expected path is [{}] not [{}]'.format(
                            name, path, pv_attrs[1])) | 
	def __validate_rook_vols(k8s_conf):
    """
    Validation of the configured kubernetes volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s Rook Volumes')
    if config_utils.is_rook_enabled(k8s_conf):
        pv_names = __get_pv_names(k8s_conf)
        logger.debug('pv_names - %s', pv_names)
        for name, size, path in config_utils.get_rook_vol_info(k8s_conf):
            logger.debug('name - %s, size - %s, path - %s', name, size, path)
            if name not in pv_names:
                raise ClusterDeploymentException(
                    'Rook PV [{}] not found'.format(name))
            else:
                pv_attrs = __get_pv_attrs(k8s_conf, name)
                if not pv_attrs[0].startswith(str(size)):
                    raise ClusterDeploymentException(
                        'PV [{}] expected size is [{}] not [{}]'.format(
                            name, size, pv_attrs[0]))
                if not pv_attrs[1] is not path:
                    raise ClusterDeploymentException(
                        'PV [{}] expected path is [{}] not [{}]'.format(
                            name, path, pv_attrs[1])) | 
| 
	Python | 
	def __get_pv_names(k8s_conf):
    """
    Returns a list of name of deployed persistent volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :return: a list of names
    """
    out_names = list()
    core_client = k8s_core_client(k8s_conf)
    pv_list = core_client.list_persistent_volume()
    for pv in pv_list.items:
        out_names.append(pv.metadata.name)
    return out_names | 
	def __get_pv_names(k8s_conf):
    """
    Returns a list of name of deployed persistent volumes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :return: a list of names
    """
    out_names = list()
    core_client = k8s_core_client(k8s_conf)
    pv_list = core_client.list_persistent_volume()
    for pv in pv_list.items:
        out_names.append(pv.metadata.name)
    return out_names | 
| 
	Python | 
	def __get_pv_attrs(k8s_conf, pv_name):
    """
    Returns the attributes for a given PV
    :return: a tuple where the first element is the size and the second is the
             path or None if not found
    """
    core_client = k8s_core_client(k8s_conf)
    pv_list = core_client.list_persistent_volume()
    logger.debug('pv_list - %s', pv_list)
    for pv in pv_list.items:
        logger.debug('pv - %s', pv)
        if pv.metadata.name == pv_name:
            return pv.spec.capacity.get('storage'), pv.spec.host_path.path
    return None, None | 
	def __get_pv_attrs(k8s_conf, pv_name):
    """
    Returns the attributes for a given PV
    :return: a tuple where the first element is the size and the second is the
             path or None if not found
    """
    core_client = k8s_core_client(k8s_conf)
    pv_list = core_client.list_persistent_volume()
    logger.debug('pv_list - %s', pv_list)
    for pv in pv_list.items:
        logger.debug('pv - %s', pv)
        if pv.metadata.name == pv_name:
            return pv.spec.capacity.get('storage'), pv.spec.host_path.path
    return None, None | 
| 
	Python | 
	def __get_pods_by_namespace(core_client, namespace):
    """
    Retrieves the pods for a given namespace
    :param core_client: the kubernetes API client
    :param namespace: the namespace of the pod to add into the return list
    :return: list of pod item objects
    """
    out_pods = list()
    pod_list = core_client.list_namespaced_pod(namespace=namespace)
    pod_items = pod_list.items
    for pod_item in pod_items:
        pod_meta = pod_item.metadata
        if pod_meta.namespace == namespace:
            out_pods.append(pod_item)
    return out_pods | 
	def __get_pods_by_namespace(core_client, namespace):
    """
    Retrieves the pods for a given namespace
    :param core_client: the kubernetes API client
    :param namespace: the namespace of the pod to add into the return list
    :return: list of pod item objects
    """
    out_pods = list()
    pod_list = core_client.list_namespaced_pod(namespace=namespace)
    pod_items = pod_list.items
    for pod_item in pod_items:
        pod_meta = pod_item.metadata
        if pod_meta.namespace == namespace:
            out_pods.append(pod_item)
    return out_pods | 
| 
	Python | 
	def __get_service_names(core_client, namespace):
    """
    Retrieves the pods for a given namespace
    :param core_client: the kubernetes API client
    :param namespace: the namespace of the pod to add into the return list
    :return: list of service names
    """
    out_names = list()
    srvc_list = core_client.list_namespaced_service(namespace)
    for srvc in srvc_list.items:
        out_names.append(srvc.metadata.name)
    return out_names | 
	def __get_service_names(core_client, namespace):
    """
    Retrieves the pods for a given namespace
    :param core_client: the kubernetes API client
    :param namespace: the namespace of the pod to add into the return list
    :return: list of service names
    """
    out_names = list()
    srvc_list = core_client.list_namespaced_service(namespace)
    for srvc in srvc_list.items:
        out_names.append(srvc.metadata.name)
    return out_names | 
| 
	Python | 
	def __get_pod_name_statuses(pod_items):
    """
    Returns a dict where the key is the name of a pod and the value is a flag
    where False indicates that the container is in a waiting state
    :param pod_items: the list of pod_items from which to extract the name
    :return: dict of pod names/status codes
    """
    out_dict = dict()
    for pod_item in pod_items:
        cont_stat = pod_item.status.container_statuses[0]
        out_dict[pod_item.metadata.name] = cont_stat.state.waiting is None
        if cont_stat.state.waiting is not None:
            logger.warn('pod_item.status.container_statuses - \n%s',
                        pod_item.status.container_statuses)
    return out_dict | 
	def __get_pod_name_statuses(pod_items):
    """
    Returns a dict where the key is the name of a pod and the value is a flag
    where False indicates that the container is in a waiting state
    :param pod_items: the list of pod_items from which to extract the name
    :return: dict of pod names/status codes
    """
    out_dict = dict()
    for pod_item in pod_items:
        cont_stat = pod_item.status.container_statuses[0]
        out_dict[pod_item.metadata.name] = cont_stat.state.waiting is None
        if cont_stat.state.waiting is not None:
            logger.warn('pod_item.status.container_statuses - \n%s',
                        pod_item.status.container_statuses)
    return out_dict | 
| 
	Python | 
	def __get_pod_service_list(pod_items):
    """
    Returns a set of pod service_account names from the pod_list parameter
    :param pod_items: the list of pod_items from which to extract the name
    :return: set of pod names
    """
    out_names = set()
    for pod_item in pod_items:
        if pod_item.spec.service_account:
            out_names.add(pod_item.spec.service_account)
        else:
            out_names.add(pod_item.metadata.name)
    return out_names | 
	def __get_pod_service_list(pod_items):
    """
    Returns a set of pod service_account names from the pod_list parameter
    :param pod_items: the list of pod_items from which to extract the name
    :return: set of pod names
    """
    out_names = set()
    for pod_item in pod_items:
        if pod_item.spec.service_account:
            out_names.add(pod_item.spec.service_account)
        else:
            out_names.add(pod_item.metadata.name)
    return out_names | 
| 
	Python | 
	def __get_storageclass_names(k8s_conf):
    """
    Retrieves the names of all of the deployed storage classes
    :param k8s_conf: the kubernetes configuration
    :return: list of storageclass names
    """
    out_names = list()
    storage_client = k8s_storage_client(k8s_conf)
    storage_classes = storage_client.list_storage_class()
    storage_items = storage_classes.items
    for storage_item in storage_items:
        storage_meta = storage_item.metadata
        out_names.append(storage_meta.name)
    return out_names | 
	def __get_storageclass_names(k8s_conf):
    """
    Retrieves the names of all of the deployed storage classes
    :param k8s_conf: the kubernetes configuration
    :return: list of storageclass names
    """
    out_names = list()
    storage_client = k8s_storage_client(k8s_conf)
    storage_classes = storage_client.list_storage_class()
    storage_items = storage_classes.items
    for storage_item in storage_items:
        storage_meta = storage_item.metadata
        out_names.append(storage_meta.name)
    return out_names | 
| 
	Python | 
	def __run(deploy_file):
    """
    Validates that the cluster has been properly deployed
    """
    k8s_conf = file_utils.read_yaml(deploy_file)
    validate_cluster.validate_all(k8s_conf) | 
	def __run(deploy_file):
    """
    Validates that the cluster has been properly deployed
    """
    k8s_conf = file_utils.read_yaml(deploy_file)
    validate_cluster.validate_all(k8s_conf) | 
| 
	Python | 
	def binary_to_decimal(binary):
    """Converts a binary number into a decimal"""
    reversed_binary = binary[::-1]    # i = correct power when reversed
    decimal = 0
    for i, value in enumerate(reversed_binary):
        if value == "0":
            continue  # ignore 0 because no value
        decimal += 2**i  # multiply 2 by i b/c index = power, add this value to decimal variable
    return decimal | 
	def binary_to_decimal(binary):
    """Converts a binary number into a decimal"""
    reversed_binary = binary[::-1]    # i = correct power when reversed
    decimal = 0
    for i, value in enumerate(reversed_binary):
        if value == "0":
            continue  # ignore 0 because no value
        decimal += 2**i  # multiply 2 by i b/c index = power, add this value to decimal variable
    return decimal | 
| 
	Python | 
	def find_index(text, pattern):
    """Return the starting index of the first occurrence of pattern in text,
    or None if not found."""
    text_index = 0  # start at beginning of text
    if pattern == "":  # check if pattern contains anything
        return text_index
    # as long as there are still letters to look thru...
    while text_index != len(text):
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    return text_index  # return index where pattern starts
        text_index += 1 | 
	def find_index(text, pattern):
    """Return the starting index of the first occurrence of pattern in text,
    or None if not found."""
    text_index = 0  # start at beginning of text
    if pattern == "":  # check if pattern contains anything
        return text_index
    # as long as there are still letters to look thru...
    while text_index != len(text):
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    return text_index  # return index where pattern starts
        text_index += 1 | 
| 
	Python | 
	def find_all_indexes(text, pattern):
    """Return a list of starting indexes of all occurrences of pattern in text,
    or an empty list if not found."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    indexes = []  # keep track of all starting indexes
    text_index = 0
    # if pattern empty, append indexes for length of text
    if pattern == "":
        for i in range(len(text)):
            indexes.append(i)
    # as long as there are still letters to look thru...
    while text_index != len(text)-1:
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    # return index where pattern starts
                    indexes.append(text_index)
        text_index += 1  # move on to next letter in text
    return indexes | 
	def find_all_indexes(text, pattern):
    """Return a list of starting indexes of all occurrences of pattern in text,
    or an empty list if not found."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    indexes = []  # keep track of all starting indexes
    text_index = 0
    # if pattern empty, append indexes for length of text
    if pattern == "":
        for i in range(len(text)):
            indexes.append(i)
    # as long as there are still letters to look thru...
    while text_index != len(text)-1:
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    # return index where pattern starts
                    indexes.append(text_index)
        text_index += 1  # move on to next letter in text
    return indexes | 
| 
	Python | 
	def length(self):
        """Return the length of this linked list by traversing its nodes.
        Running time: O(n) because it finds the length linearly  """
    
        node_count = 0 # initialize node count at 0 
        node = self.head # start at beginning of ll
        # Loop until the node is None (one past tail)
        while node is not None:
            node_count += 1 # increase node count
            node = node.next # skip to next node
        return node_count | 
	def length(self):
        """Return the length of this linked list by traversing its nodes.
        Running time: O(n) because it finds the length linearly  """
    
        node_count = 0 # initialize node count at 0 
        node = self.head # start at beginning of ll
        # Loop until the node is None (one past tail)
        while node is not None:
            node_count += 1 # increase node count
            node = node.next # skip to next node
        return node_count | 
| 
	Python | 
	def insert_at_index(self, index, item):
        """Insert the given item at the given index in this linked list, or
        raise ValueError if the given index is out of range of the list size.
        Best case running time: O(1) if list is empty or item is first
        Worst case running time: O(n) if index is later in list or not found"""
        # Check if the given index is out of range and if so raise an error
        if not (0 <= index <= self.size):
            raise ValueError('List index out of range: {}'.format(index))
    
        elif (index == 0): # if index 0, simply add item to beginningg of linked list
            self.prepend(item)
        elif (index == self.size): # if index is length of linked list, simply add item to end of linked list
            self.append(item)
        
        else: # if neither of the above, create a new node for item
            new_node = Node(item)
            previous_node = self.head  # reset start of ll
            i = 0
            while i != (index - 1) and previous_node is not None:
                previous_node = previous_node.next
                i += 1
            next_node = previous_node.next
            new_node.next = next_node # reset next_node
            previous_node.next = new_node # reset new_node
            self.size += 1 | 
	def insert_at_index(self, index, item):
        """Insert the given item at the given index in this linked list, or
        raise ValueError if the given index is out of range of the list size.
        Best case running time: O(1) if list is empty or item is first
        Worst case running time: O(n) if index is later in list or not found"""
        # Check if the given index is out of range and if so raise an error
        if not (0 <= index <= self.size):
            raise ValueError('List index out of range: {}'.format(index))
    
        elif (index == 0): # if index 0, simply add item to beginningg of linked list
            self.prepend(item)
        elif (index == self.size): # if index is length of linked list, simply add item to end of linked list
            self.append(item)
        
        else: # if neither of the above, create a new node for item
            new_node = Node(item)
            previous_node = self.head  # reset start of ll
            i = 0
            while i != (index - 1) and previous_node is not None:
                previous_node = previous_node.next
                i += 1
            next_node = previous_node.next
            new_node.next = next_node # reset next_node
            previous_node.next = new_node # reset new_node
            self.size += 1 | 
| 
	Python | 
	def append(self, item):
        """Insert the given item at the tail of this linked list.
        Best and worst case running time: O(1) because item is always inserted at the end"""
        new_node = Node(item) # make new node to hold item 
        if self.is_empty(): # check is ll is empty
            self.head = new_node # make new node the head
        else:
            self.tail.next = new_node # otherwise inert new node at end of ll 
        self.tail = new_node # make last node our new node
        self.size += 1 | 
	def append(self, item):
        """Insert the given item at the tail of this linked list.
        Best and worst case running time: O(1) because item is always inserted at the end"""
        new_node = Node(item) # make new node to hold item 
        if self.is_empty(): # check is ll is empty
            self.head = new_node # make new node the head
        else:
            self.tail.next = new_node # otherwise inert new node at end of ll 
        self.tail = new_node # make last node our new node
        self.size += 1 | 
| 
	Python | 
	def find(self, quality):
        """Return an item from this linked list satisfying the given quality.
        Best case running time: Omega(1) if item is near the head of the list.
        Worst case running time: O(n) if item is near the tail of the list or
        not present and we need to loop through all n nodes in the list."""
        node = self.head  # start at the head node
        # Loop until the node is None (one node past end)
        while node is not None:  
            # Check if this node's data satisfies given quality 
            if quality(node.data): 
                # exit if satisfied the quality
                return node.data 
            node = node.next  
        return None | 
	def find(self, quality):
        """Return an item from this linked list satisfying the given quality.
        Best case running time: Omega(1) if item is near the head of the list.
        Worst case running time: O(n) if item is near the tail of the list or
        not present and we need to loop through all n nodes in the list."""
        node = self.head  # start at the head node
        # Loop until the node is None (one node past end)
        while node is not None:  
            # Check if this node's data satisfies given quality 
            if quality(node.data): 
                # exit if satisfied the quality
                return node.data 
            node = node.next  
        return None | 
| 
	Python | 
	def replace(self, old_data, new_data):
        """Replace the given old_data in this linked list with given new_data
        using the same node, or raise ValueError if old_data is not found.
        Best case running time: O(1) if list is empty or item is first
        Worst case running time: O(n) if old_data is later in list or not found"""
        node = self.head # start at head node
        found = None
        # Loop until the node is None (one past end)
        while node is not None:  
            # Check if this node's data satisfies quality
            if node.data == old_data:
                found = node # found node
                break
            # Skip to the next node
            node = node.next 
        if found == None: # if we never find node that satisfies quality, raise error 
            raise ValueError("value not found!")
        found.data = new_data | 
	def replace(self, old_data, new_data):
        """Replace the given old_data in this linked list with given new_data
        using the same node, or raise ValueError if old_data is not found.
        Best case running time: O(1) if list is empty or item is first
        Worst case running time: O(n) if old_data is later in list or not found"""
        node = self.head # start at head node
        found = None
        # Loop until the node is None (one past end)
        while node is not None:  
            # Check if this node's data satisfies quality
            if node.data == old_data:
                found = node # found node
                break
            # Skip to the next node
            node = node.next 
        if found == None: # if we never find node that satisfies quality, raise error 
            raise ValueError("value not found!")
        found.data = new_data | 
| 
	Python | 
	def delete(self, item):
        """Delete the given item from this linked list, or raise ValueError.
        Best case running time: O(1) if list is empty or item is first
        Worst case running time: O(n) if item is not found or is later in the list"""
        node = self.head # start at head
        # Keep track of the node before node containing item
        previous = None
        
        found = False # Create variable to track if we have found item
        # Loop until we have found item or the node is None
        while not found and node is not None:
            if node.data == item: #Check if the node's data matches item
                found = True # update found if we find matching item
            else:
                previous = node   # Skip to the next node
                node = node.next 
        # Check if we found the given item or we never did and reached the tail
        if found:
            self.size -= 1
            # Check if we found node in the middle of ll
            if node is not self.head and node is not self.tail:
                # Update the previous node to skip around the found node
                previous.next = node.next
                node.next = None
            if node is self.head: # check if node found at head
                self.head = node.next # update head
                node.next = None
            if node is self.tail: # check if node at tail
                # Check if node exists before found node
                if previous is not None:
                    previous.next = None
                self.tail = previous # update tail 
        else:
            # otherwise raise error
            raise ValueError('Item not found: {}'.format(item)) | 
	def delete(self, item):
        """Delete the given item from this linked list, or raise ValueError.
        Best case running time: O(1) if list is empty or item is first
        Worst case running time: O(n) if item is not found or is later in the list"""
        node = self.head # start at head
        # Keep track of the node before node containing item
        previous = None
        
        found = False # Create variable to track if we have found item
        # Loop until we have found item or the node is None
        while not found and node is not None:
            if node.data == item: #Check if the node's data matches item
                found = True # update found if we find matching item
            else:
                previous = node   # Skip to the next node
                node = node.next 
        # Check if we found the given item or we never did and reached the tail
        if found:
            self.size -= 1
            # Check if we found node in the middle of ll
            if node is not self.head and node is not self.tail:
                # Update the previous node to skip around the found node
                previous.next = node.next
                node.next = None
            if node is self.head: # check if node found at head
                self.head = node.next # update head
                node.next = None
            if node is self.tail: # check if node at tail
                # Check if node exists before found node
                if previous is not None:
                    previous.next = None
                self.tail = previous # update tail 
        else:
            # otherwise raise error
            raise ValueError('Item not found: {}'.format(item)) | 
| 
	Python | 
	def contains(text, pattern):
    """Return a boolean indicating whether pattern occurs in text."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    # Reference find_index function to confirm if pattern exists
    if find_index(text, pattern) != None:
        return True
    else:
        return False | 
	def contains(text, pattern):
    """Return a boolean indicating whether pattern occurs in text."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    # Reference find_index function to confirm if pattern exists
    if find_index(text, pattern) != None:
        return True
    else:
        return False | 
| 
	Python | 
	def find_index(text, pattern):
    """Return the starting index of the first occurrence of pattern in text,
    or None if not found."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    text_index = 0  # start at beginning of text
    if pattern == "":  # check if pattern contains anything
        return text_index
    # as long as there are still letters to look thru...
    while text_index != len(text):
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    return text_index  # return index where pattern starts
        text_index += 1 | 
	def find_index(text, pattern):
    """Return the starting index of the first occurrence of pattern in text,
    or None if not found."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    text_index = 0  # start at beginning of text
    if pattern == "":  # check if pattern contains anything
        return text_index
    # as long as there are still letters to look thru...
    while text_index != len(text):
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    return text_index  # return index where pattern starts
        text_index += 1 | 
| 
	Python | 
	def find_all_indexes(text, pattern):
    """Return a list of starting indexes of all occurrences of pattern in text,
    or an empty list if not found."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    indexes = []  # keep track of all starting indexes
    text_index = 0
    # if pattern empty, append indexes for length of text
    if pattern == "":
        for i in range(len(text)):
            indexes.append(i)
    # as long as there are still letters to look thru...
    while text_index != len(text):
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    # return index where pattern starts
                    indexes.append(text_index)
        text_index += 1  # move on to next letter in text
    return indexes | 
	def find_all_indexes(text, pattern):
    """Return a list of starting indexes of all occurrences of pattern in text,
    or an empty list if not found."""
    assert isinstance(text, str), 'text is not a string: {}'.format(text)
    assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
    indexes = []  # keep track of all starting indexes
    text_index = 0
    # if pattern empty, append indexes for length of text
    if pattern == "":
        for i in range(len(text)):
            indexes.append(i)
    # as long as there are still letters to look thru...
    while text_index != len(text):
        for i in range(len(pattern)):
            if text_index + i < len(text):
                # check letter from text again letter from pattern
                if text[text_index + i] != pattern[i]:
                    break  # stop if no match
                if i == len(pattern) - 1:
                    # return index where pattern starts
                    indexes.append(text_index)
        text_index += 1  # move on to next letter in text
    return indexes | 
| 
	Python | 
	def is_palindrome_iterative(text):
    """is_palindrome_iterative return True if input text is a palindrome, and false if not"""
    # Start at either end of the word, work towards middle
    left_index = 0
    right_index = len(text)-1
    # Ensure middle hasn't been surpased
    while left_index <= right_index:
        if text[left_index] != text[right_index]:
            return False
        else:
            left_index += 1
            right_index -= 1
    return True | 
	def is_palindrome_iterative(text):
    """is_palindrome_iterative return True if input text is a palindrome, and false if not"""
    # Start at either end of the word, work towards middle
    left_index = 0
    right_index = len(text)-1
    # Ensure middle hasn't been surpased
    while left_index <= right_index:
        if text[left_index] != text[right_index]:
            return False
        else:
            left_index += 1
            right_index -= 1
    return True | 
| 
	Python | 
	def encode(number, base):
    """Encode given number in base 10 to digits in given base.
    number: int -- integer representation of number (in base 10)
    base: int -- base to convert to
    return: str -- string representation of number (in given base)"""
    # Handle up to base 36 [0-9a-z]
    assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
    # Handle unsigned numbers only for now
    assert number >= 0, 'number is negative: {}'.format(number)
    encoded_result = ''
    while number > 0:
        remainder = int(number % base)
        number = number//base
        conversion = string.printable
        encoded_result += conversion[remainder]
    return encoded_result | 
	def encode(number, base):
    """Encode given number in base 10 to digits in given base.
    number: int -- integer representation of number (in base 10)
    base: int -- base to convert to
    return: str -- string representation of number (in given base)"""
    # Handle up to base 36 [0-9a-z]
    assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
    # Handle unsigned numbers only for now
    assert number >= 0, 'number is negative: {}'.format(number)
    encoded_result = ''
    while number > 0:
        remainder = int(number % base)
        number = number//base
        conversion = string.printable
        encoded_result += conversion[remainder]
    return encoded_result | 
| 
	Python | 
	def _resize(self, new_size=None):
        """Resize this hash table's buckets and rehash all key-value entries.
        Should be called automatically when load factor exceeds a threshold
        such as 0.75 after an insertion (when set is called with a new key).
        Best and worst case running time: ??? under what conditions? [TODO]
        Best and worst case space usage: ??? what uses this memory? [TODO]"""
        # If unspecified, choose new size dynamically based on current size
        if new_size is None:
            new_size = len(self.buckets) * 2  # Double size
        # Option to reduce size if buckets are sparsely filled (low load factor)
        elif new_size is 0:
            new_size = len(self.buckets) / 2  # Half size
        # TODO: Get a list to temporarily hold all current key-value entries
        temp_entries = self.items()
    
        # TODO: Create a new list of new_size total empty linked list buckets
        self.__init__(new_size)
        # ...
        # TODO: Insert each key-value entry into the new list of buckets,
        # which will rehash them into a new bucket index based on the new size
        for entry in temp_entries:
            key = entry[0]
            value = entry[1]
            self.set(key,value) | 
	def _resize(self, new_size=None):
        """Resize this hash table's buckets and rehash all key-value entries.
        Should be called automatically when load factor exceeds a threshold
        such as 0.75 after an insertion (when set is called with a new key).
        Best and worst case running time: ??? under what conditions? [TODO]
        Best and worst case space usage: ??? what uses this memory? [TODO]"""
        # If unspecified, choose new size dynamically based on current size
        if new_size is None:
            new_size = len(self.buckets) * 2  # Double size
        # Option to reduce size if buckets are sparsely filled (low load factor)
        elif new_size is 0:
            new_size = len(self.buckets) / 2  # Half size
        # TODO: Get a list to temporarily hold all current key-value entries
        temp_entries = self.items()
    
        # TODO: Create a new list of new_size total empty linked list buckets
        self.__init__(new_size)
        # ...
        # TODO: Insert each key-value entry into the new list of buckets,
        # which will rehash them into a new bucket index based on the new size
        for entry in temp_entries:
            key = entry[0]
            value = entry[1]
            self.set(key,value) | 
| 
	Python | 
	def visit_Import(self, node, prefix='', level=0):
        """
        For imports of the form
            `from something import anything`
        prefix is set to "something".
        For imports of the form
            `from .relative import anything`
        level is set to a number indicating the number
        of parent directories (e.g. in this case level=1)
        """
        def handle_src_name(name):
            # Get the module name and prepend prefix if necessary
            src_name = name
            if prefix:
                src_name = prefix + "." + src_name
            return src_name
        def handle_scopes(imp_name, tgt_name, modname):
            def create_def(scope, name, imported_def):
                if not name in scope.get_defs():
                    def_ns = utils.join_ns(scope.get_ns(), name)
                    defi = self.def_manager.get(def_ns)
                    if not defi:
                        defi = self.def_manager.assign(def_ns, imported_def)
                    defi.get_name_pointer().add(imported_def.get_ns())
                    current_scope.add_def(name, defi)
            current_scope = self.scope_manager.get_scope(self.current_ns)
            imported_scope = self.scope_manager.get_scope(modname)
            if tgt_name == "*":
                for name, defi in imported_scope.get_defs().items():
                    create_def(current_scope, name, defi)
                    current_scope.get_def(name).get_name_pointer().add(defi.get_ns())
            else:
                # if it exists in the imported scope then copy it
                defi = imported_scope.get_def(imp_name)
                if not defi:
                    # maybe its a full namespace
                    defi = self.def_manager.get(imp_name)
                if defi:
                    create_def(current_scope, tgt_name, defi)
                    current_scope.get_def(tgt_name).get_name_pointer().add(defi.get_ns())
        def add_external_def(name, target):
            # add an external def for the name
            defi = self.def_manager.get(name)
            if not defi:
                defi = self.def_manager.create(name, utils.constants.EXT_DEF)
            scope = self.scope_manager.get_scope(self.current_ns)
            if target != "*":
                # add a def for the target that points to the name
                tgt_ns = utils.join_ns(scope.get_ns(), target)
                tgt_defi = self.def_manager.get(tgt_ns)
                if not tgt_defi:
                    tgt_defi = self.def_manager.create(tgt_ns, utils.constants.EXT_DEF)
                tgt_defi.get_name_pointer().add(defi.get_ns())
                scope.add_def(target, tgt_defi)
        for import_item in node.names:
            src_name = handle_src_name(import_item.name)
            tgt_name = import_item.asname if import_item.asname else import_item.name
            imported_name = self.import_manager.handle_import(src_name, level)
            if not imported_name:
                add_external_def(src_name, tgt_name)
                continue
            fname = self.import_manager.get_filepath(imported_name)
            if not fname:
                add_external_def(src_name, tgt_name)
                continue
            # only analyze modules under the current directory
            if self.import_manager.get_mod_dir() in fname:
                if not imported_name in self.modules_analyzed:
                    self.analyze_submodule(imported_name)
                handle_scopes(import_item.name, tgt_name, imported_name)
            else:
                add_external_def(src_name, tgt_name)
        # handle all modules that were not analyzed
        for modname in self.import_manager.get_imports(self.modname):
            fname = self.import_manager.get_filepath(modname)
            if not fname:
                continue
            # only analyze modules under the current directory
            if self.import_manager.get_mod_dir() in fname and \
                not modname in self.modules_analyzed:
                    self.analyze_submodule(modname) | 
	def visit_Import(self, node, prefix='', level=0):
        """
        For imports of the form
            `from something import anything`
        prefix is set to "something".
        For imports of the form
            `from .relative import anything`
        level is set to a number indicating the number
        of parent directories (e.g. in this case level=1)
        """
        def handle_src_name(name):
            # Get the module name and prepend prefix if necessary
            src_name = name
            if prefix:
                src_name = prefix + "." + src_name
            return src_name
        def handle_scopes(imp_name, tgt_name, modname):
            def create_def(scope, name, imported_def):
                if not name in scope.get_defs():
                    def_ns = utils.join_ns(scope.get_ns(), name)
                    defi = self.def_manager.get(def_ns)
                    if not defi:
                        defi = self.def_manager.assign(def_ns, imported_def)
                    defi.get_name_pointer().add(imported_def.get_ns())
                    current_scope.add_def(name, defi)
            current_scope = self.scope_manager.get_scope(self.current_ns)
            imported_scope = self.scope_manager.get_scope(modname)
            if tgt_name == "*":
                for name, defi in imported_scope.get_defs().items():
                    create_def(current_scope, name, defi)
                    current_scope.get_def(name).get_name_pointer().add(defi.get_ns())
            else:
                # if it exists in the imported scope then copy it
                defi = imported_scope.get_def(imp_name)
                if not defi:
                    # maybe its a full namespace
                    defi = self.def_manager.get(imp_name)
                if defi:
                    create_def(current_scope, tgt_name, defi)
                    current_scope.get_def(tgt_name).get_name_pointer().add(defi.get_ns())
        def add_external_def(name, target):
            # add an external def for the name
            defi = self.def_manager.get(name)
            if not defi:
                defi = self.def_manager.create(name, utils.constants.EXT_DEF)
            scope = self.scope_manager.get_scope(self.current_ns)
            if target != "*":
                # add a def for the target that points to the name
                tgt_ns = utils.join_ns(scope.get_ns(), target)
                tgt_defi = self.def_manager.get(tgt_ns)
                if not tgt_defi:
                    tgt_defi = self.def_manager.create(tgt_ns, utils.constants.EXT_DEF)
                tgt_defi.get_name_pointer().add(defi.get_ns())
                scope.add_def(target, tgt_defi)
        for import_item in node.names:
            src_name = handle_src_name(import_item.name)
            tgt_name = import_item.asname if import_item.asname else import_item.name
            imported_name = self.import_manager.handle_import(src_name, level)
            if not imported_name:
                add_external_def(src_name, tgt_name)
                continue
            fname = self.import_manager.get_filepath(imported_name)
            if not fname:
                add_external_def(src_name, tgt_name)
                continue
            # only analyze modules under the current directory
            if self.import_manager.get_mod_dir() in fname:
                if not imported_name in self.modules_analyzed:
                    self.analyze_submodule(imported_name)
                handle_scopes(import_item.name, tgt_name, imported_name)
            else:
                add_external_def(src_name, tgt_name)
        # handle all modules that were not analyzed
        for modname in self.import_manager.get_imports(self.modname):
            fname = self.import_manager.get_filepath(modname)
            if not fname:
                continue
            # only analyze modules under the current directory
            if self.import_manager.get_mod_dir() in fname and \
                not modname in self.modules_analyzed:
                    self.analyze_submodule(modname) | 
| 
	Python | 
	def onehot_encoder(X_train, cat_columns, X_test=None):
    """
    This function encodes categorical variables using the popular onehot
    method for each category.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set containing
          some categorical features/columns.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    Examples
    -------
    >>> encodings = onehot_encoder(
    my_train,
    my_test,
    cat_columns = ['foo'])
    >>> train_new = encodings[0]
    """
    # check if input cat_columns is a list
    if isinstance(cat_columns, list) is False:
        raise Exception("cat_columns must be a list type")
    # Process X_train data
    if X_train is None:
        train_processed = X_train
    else:
        # check if input X_train is a data frame
        if isinstance(X_train, pd.DataFrame) is False:
            raise Exception("X_train must be a pandas Dataframe type")
        # add temporary data frame for X_train
        data = X_train
        results = pd.DataFrame(
            np.nan, index=np.arange(
                data.shape[0]), columns=['tobedeleted'])
        # Perform one hot encoding for training dataset
        for i in data.columns:
            if i in cat_columns:
                df = data[[i]]
                df.insert(df.shape[1], "values", 1.0)
                OH_df = df.pivot(values="values", columns=i).fillna(0)
                for j in OH_df.columns:
                    # rename columns
                    OH_df.rename({j: i + '_' + str(j)}, axis=1,
                                 inplace=True)
                # Add OH converted columns to results
                results = pd.concat([results, OH_df], axis=1)
            else:
                # Copy original to results
                results = pd.concat([results, data[[i]]], axis=1)
        # remove empty column created initially
        train_processed = results.drop(columns=['tobedeleted'])
    # Process X_test data
    if X_test is None:
        return train_processed
    else:
        # Check that input is valid
        if isinstance(X_test, pd.DataFrame) is False:
            raise Exception("X_test must be a pandas Dataframe type")
        # add temporary data frame for X_test
        data = X_test
        results = pd.DataFrame(
            np.nan, index=np.arange(
                data.shape[0]), columns=['tobedeleted'])
        # perform one hot encoding for testing dataset
        for i in data.columns:
            if i in cat_columns:
                df = data[[i]]
                df.insert(df.shape[1], "values", 1.0)
                OH_df = df.pivot(values="values", columns=i).fillna(0)
                for j in OH_df.columns:
                    # rename columns
                    OH_df.rename({j: i + '_' + str(j)}, axis=1,
                                 inplace=True)
                # Add OH converted columns to results
                results = pd.concat([results, OH_df], axis=1)
            else:
                # Copy original to results
                results = pd.concat([results, data[[i]]], axis=1)
        # remove empty column created initially
        test_processed = results.drop(columns=['tobedeleted'])
        return [train_processed, test_processed] | 
	def onehot_encoder(X_train, cat_columns, X_test=None):
    """
    This function encodes categorical variables using the popular onehot
    method for each category.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set containing
          some categorical features/columns.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    Examples
    -------
    >>> encodings = onehot_encoder(
    my_train,
    my_test,
    cat_columns = ['foo'])
    >>> train_new = encodings[0]
    """
    # check if input cat_columns is a list
    if isinstance(cat_columns, list) is False:
        raise Exception("cat_columns must be a list type")
    # Process X_train data
    if X_train is None:
        train_processed = X_train
    else:
        # check if input X_train is a data frame
        if isinstance(X_train, pd.DataFrame) is False:
            raise Exception("X_train must be a pandas Dataframe type")
        # add temporary data frame for X_train
        data = X_train
        results = pd.DataFrame(
            np.nan, index=np.arange(
                data.shape[0]), columns=['tobedeleted'])
        # Perform one hot encoding for training dataset
        for i in data.columns:
            if i in cat_columns:
                df = data[[i]]
                df.insert(df.shape[1], "values", 1.0)
                OH_df = df.pivot(values="values", columns=i).fillna(0)
                for j in OH_df.columns:
                    # rename columns
                    OH_df.rename({j: i + '_' + str(j)}, axis=1,
                                 inplace=True)
                # Add OH converted columns to results
                results = pd.concat([results, OH_df], axis=1)
            else:
                # Copy original to results
                results = pd.concat([results, data[[i]]], axis=1)
        # remove empty column created initially
        train_processed = results.drop(columns=['tobedeleted'])
    # Process X_test data
    if X_test is None:
        return train_processed
    else:
        # Check that input is valid
        if isinstance(X_test, pd.DataFrame) is False:
            raise Exception("X_test must be a pandas Dataframe type")
        # add temporary data frame for X_test
        data = X_test
        results = pd.DataFrame(
            np.nan, index=np.arange(
                data.shape[0]), columns=['tobedeleted'])
        # perform one hot encoding for testing dataset
        for i in data.columns:
            if i in cat_columns:
                df = data[[i]]
                df.insert(df.shape[1], "values", 1.0)
                OH_df = df.pivot(values="values", columns=i).fillna(0)
                for j in OH_df.columns:
                    # rename columns
                    OH_df.rename({j: i + '_' + str(j)}, axis=1,
                                 inplace=True)
                # Add OH converted columns to results
                results = pd.concat([results, OH_df], axis=1)
            else:
                # Copy original to results
                results = pd.concat([results, data[[i]]], axis=1)
        # remove empty column created initially
        test_processed = results.drop(columns=['tobedeleted'])
        return [train_processed, test_processed] | 
| 
	Python | 
	def conjugate_encoder(
        X_train,
        y,
        cat_columns,
        prior_params,
        X_test=None,
        objective="regression"):
    """This function encodes categorical variables by fitting a posterior
    distribution per each category to the target variable y, using a known
    conjugate-prior. The resulting mean(s) of each posterior distribution per
    each category are used as the encodings.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set containing
          some categorical features/columns.
    y : pd.Series
          A pandas series representing the target variable. If the objective
          is "binary", then this series should only contain two unique
          values.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    prior_params: dict
          A dictionary of parameters for each prior distribution assumed.
          For regression, this requires a dictionary with four keys and four
          values: mu, vega, alpha, beta. All must be real numbers, and must
          be greater than 0 except for mu, which can be negative. A value of
          alpha > 1 is strongly advised. For binary classification,
          this requires a dictionary with two keys and two
          values: alpha, beta. All must be real numbers and be greater than 0.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    objective : str
          A string, either "regression" or "binary" specifying the problem.
          Default is regression. For regression, a normal-inverse gamma
          prior + normal likelihood is assumed. For binary classification, a
          beta prior with binomial likelihood is assumed.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings. For regression,
          the encodings will return 2 columns, since the normal-inverse gamma
          distribution is two dimensional. For binary classification, the
          encodings will return 1 column.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    References
    ----------
    Slakey et al., "Encoding Categorical Variables with Conjugate Bayesian
    Models for WeWork Lead Scoring Engine", 2019.
    Examples
    -------
    >>> encodings = conjugate_encoder(
    my_train,
    my_test,
    my_train['y'],
    cat_columns = ['foo'],
    prior = {alpha: 3, beta: 3},
    objective = "binary")
    >>> train_new = encodings[0]
    """
    # check the input of objective function
    if objective not in ['regression', 'binary']:
        raise Exception("Objective must be either regression or binary.")
    # check if X_train contains cat_columns
    if (set(cat_columns).issubset(X_train.columns)) is False:
        raise Exception("X_train must contain cat_columns.")
    # check if input cat_columns is a list
    if isinstance(cat_columns, list) is False:
        raise Exception("Type of cat_columns must be a list.")
    # check if input X_train is a data frame
    if (isinstance(X_train, pd.DataFrame)) is False:
        raise Exception("Type of X_train must be pd.Dataframe.")
    # check if input y is a pandas series
    if isinstance(y, pd.Series) is False:
        raise Exception("Type of y must be pd.Series.")
    if X_test is not None:
        # check if X_test contains cat_columns
        if (set(cat_columns).issubset(X_test.columns)) is False:
            raise Exception("X_test must contain cat_columns.")
        # check if input X_test is a data frame
        if (isinstance(X_test, pd.DataFrame)) is False:
            raise Exception("X_test must be pd.Dataframe.")
    # for regression case, check if prior specification is valid
    if objective == "regression":
        if set(prior_params.keys()).issubset(
                set(["mu", "alpha", "beta", "vega"])) is False:
            raise Exception(
                "Invalid prior specification. The dictionary must include"
                "four keys for regression."
                )
        if (prior_params['vega'] <= 0 or
                prior_params['beta'] <= 0 or
                prior_params['alpha'] <= 0):
            raise Exception("Invalid prior specification. vega, alpha and"
                            "beta should all be positive.")
        # set prior parameters
        mu = prior_params['mu']
        alpha = prior_params['alpha']
        vega = prior_params['vega']
        beta = prior_params['beta']
        n = X_train.shape[0]
        # make sure the size of X_train is not 0
        if n == 1:
            raise Exception("Cannot fit encodings with only one data point.")
        train_processed = X_train.copy()
        if X_test is not None:
            test_processed = X_test.copy()
        for col in cat_columns:
            # calculate mean and variance from column y
            conditionals = train_processed.groupby(
                col)[y.name].aggregate(['mean', 'var'])
            conditionals.columns = ['encoded_mean', 'encoded_var']
            # check if there is NA in encoded variance
            if conditionals['encoded_var'].isnull().any():
                raise Exception(
                    "NA's fitted for expected variance. The variance of a "
                    "single data point does not exist. Make sure columns "
                    "specified are truly categorical.")
            # set posterior value for parameters
            mu_post = (vega * mu + n *
                       conditionals['encoded_mean']) / (vega + n)
            alpha_post = alpha + n / 2
            beta_post = beta + 0.5 * n * conditionals['encoded_var'] + (
                (n * vega) / (vega + n)) * (((conditionals['encoded_mean'] -
                                              mu)**2) / 2)
            # encode the variables
            all_encodings = pd.concat(
                [mu_post, beta_post / (alpha_post - 1)], axis=1).reset_index()
            all_encodings.columns = [
                col,
                'encoded_mean' + "_" + col,
                'encoded_var' + "_" + col]
            # merge the encoded value to new training dataset
            train_processed = train_processed.merge(
                all_encodings, on=col, how="left")
            if X_test is not None:
                # merge the encoded value to new testing dataset
                test_processed = test_processed.merge(
                    all_encodings, on=col, how="left")
                test_processed['encoded_mean' + "_" + col] = test_processed[
                    'encoded_mean' + "_" + col].fillna(mu)
                # check if alpha parameter valid
                if alpha == 1:
                    raise Exception(
                            "Cannot fill missing values in test if alpha is "
                            "1.")
                # calculate prior of variance
                prior_var = beta / (alpha - 1)
                # encode the values exists only in test dataset
                test_processed['encoded_var' +
                               "_" +
                               col] = test_processed['encoded_var' +
                                                     "_" +
                                                     col].fillna(prior_var)
                # drop orignial columns
                test_processed = test_processed.drop(columns=col, axis=1)
            train_processed = train_processed.drop(columns=col, axis=1)
    # for binary case
    else:
        # check if the prior specification are valid
        if set(prior_params.keys()).issubset(set(["alpha", "beta"])) is False:
            raise Exception(
                "Invalid prior specification. The dictionary must include"
                "keys alpha, beta for binary classification.")
        if prior_params['alpha'] <= 0 or prior_params['beta'] <= 0:
            raise Exception(
                "Invalid prior specification. alpha and beta should all be"
                "positive.")
        # check if the response variable is binary
        if len(set(y)) != 2:
            raise Exception(
                "Binary classification can only have two unique values.")
        y = y.copy()
        if y.dtype == "object":
            y = np.where(y == y.unique()[0], 0, 1)
        # set prior parameters
        alpha = prior_params['alpha']
        beta = prior_params['beta']
        n = X_train.shape[0]
        train_processed = X_train.copy()
        if X_test is not None:
            test_processed = X_test.copy()
        for col in cat_columns:
            # calculate the sum from column y
            conditionals = train_processed.groupby(
                col)[y.name].aggregate(['sum'])
            conditionals.columns = ['encoded_sum']
            # set posterior value for parameters
            alpha_post = alpha + conditionals['encoded_sum']
            beta_post = beta + n - conditionals['encoded_sum']
            posterior_mean = (alpha_post / (alpha_post + beta_post)).to_dict()
            # map the encoded values from training dataset
            train_processed.loc[:, col] = train_processed[col].map(
                posterior_mean)
            # map the encoded values from testing dataset
            if X_test is not None:
                prior_mean = alpha / (alpha + beta)
                test_processed.loc[:, col] = test_processed[col].map(
                    posterior_mean)
                test_processed.loc[:,
                                   col] = test_processed[col].fillna(
                                       prior_mean)
    return [
        train_processed,
        test_processed] if X_test is not None else [train_processed] | 
	def conjugate_encoder(
        X_train,
        y,
        cat_columns,
        prior_params,
        X_test=None,
        objective="regression"):
    """This function encodes categorical variables by fitting a posterior
    distribution per each category to the target variable y, using a known
    conjugate-prior. The resulting mean(s) of each posterior distribution per
    each category are used as the encodings.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set containing
          some categorical features/columns.
    y : pd.Series
          A pandas series representing the target variable. If the objective
          is "binary", then this series should only contain two unique
          values.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    prior_params: dict
          A dictionary of parameters for each prior distribution assumed.
          For regression, this requires a dictionary with four keys and four
          values: mu, vega, alpha, beta. All must be real numbers, and must
          be greater than 0 except for mu, which can be negative. A value of
          alpha > 1 is strongly advised. For binary classification,
          this requires a dictionary with two keys and two
          values: alpha, beta. All must be real numbers and be greater than 0.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    objective : str
          A string, either "regression" or "binary" specifying the problem.
          Default is regression. For regression, a normal-inverse gamma
          prior + normal likelihood is assumed. For binary classification, a
          beta prior with binomial likelihood is assumed.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings. For regression,
          the encodings will return 2 columns, since the normal-inverse gamma
          distribution is two dimensional. For binary classification, the
          encodings will return 1 column.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    References
    ----------
    Slakey et al., "Encoding Categorical Variables with Conjugate Bayesian
    Models for WeWork Lead Scoring Engine", 2019.
    Examples
    -------
    >>> encodings = conjugate_encoder(
    my_train,
    my_test,
    my_train['y'],
    cat_columns = ['foo'],
    prior = {alpha: 3, beta: 3},
    objective = "binary")
    >>> train_new = encodings[0]
    """
    # check the input of objective function
    if objective not in ['regression', 'binary']:
        raise Exception("Objective must be either regression or binary.")
    # check if X_train contains cat_columns
    if (set(cat_columns).issubset(X_train.columns)) is False:
        raise Exception("X_train must contain cat_columns.")
    # check if input cat_columns is a list
    if isinstance(cat_columns, list) is False:
        raise Exception("Type of cat_columns must be a list.")
    # check if input X_train is a data frame
    if (isinstance(X_train, pd.DataFrame)) is False:
        raise Exception("Type of X_train must be pd.Dataframe.")
    # check if input y is a pandas series
    if isinstance(y, pd.Series) is False:
        raise Exception("Type of y must be pd.Series.")
    if X_test is not None:
        # check if X_test contains cat_columns
        if (set(cat_columns).issubset(X_test.columns)) is False:
            raise Exception("X_test must contain cat_columns.")
        # check if input X_test is a data frame
        if (isinstance(X_test, pd.DataFrame)) is False:
            raise Exception("X_test must be pd.Dataframe.")
    # for regression case, check if prior specification is valid
    if objective == "regression":
        if set(prior_params.keys()).issubset(
                set(["mu", "alpha", "beta", "vega"])) is False:
            raise Exception(
                "Invalid prior specification. The dictionary must include"
                "four keys for regression."
                )
        if (prior_params['vega'] <= 0 or
                prior_params['beta'] <= 0 or
                prior_params['alpha'] <= 0):
            raise Exception("Invalid prior specification. vega, alpha and"
                            "beta should all be positive.")
        # set prior parameters
        mu = prior_params['mu']
        alpha = prior_params['alpha']
        vega = prior_params['vega']
        beta = prior_params['beta']
        n = X_train.shape[0]
        # make sure the size of X_train is not 0
        if n == 1:
            raise Exception("Cannot fit encodings with only one data point.")
        train_processed = X_train.copy()
        if X_test is not None:
            test_processed = X_test.copy()
        for col in cat_columns:
            # calculate mean and variance from column y
            conditionals = train_processed.groupby(
                col)[y.name].aggregate(['mean', 'var'])
            conditionals.columns = ['encoded_mean', 'encoded_var']
            # check if there is NA in encoded variance
            if conditionals['encoded_var'].isnull().any():
                raise Exception(
                    "NA's fitted for expected variance. The variance of a "
                    "single data point does not exist. Make sure columns "
                    "specified are truly categorical.")
            # set posterior value for parameters
            mu_post = (vega * mu + n *
                       conditionals['encoded_mean']) / (vega + n)
            alpha_post = alpha + n / 2
            beta_post = beta + 0.5 * n * conditionals['encoded_var'] + (
                (n * vega) / (vega + n)) * (((conditionals['encoded_mean'] -
                                              mu)**2) / 2)
            # encode the variables
            all_encodings = pd.concat(
                [mu_post, beta_post / (alpha_post - 1)], axis=1).reset_index()
            all_encodings.columns = [
                col,
                'encoded_mean' + "_" + col,
                'encoded_var' + "_" + col]
            # merge the encoded value to new training dataset
            train_processed = train_processed.merge(
                all_encodings, on=col, how="left")
            if X_test is not None:
                # merge the encoded value to new testing dataset
                test_processed = test_processed.merge(
                    all_encodings, on=col, how="left")
                test_processed['encoded_mean' + "_" + col] = test_processed[
                    'encoded_mean' + "_" + col].fillna(mu)
                # check if alpha parameter valid
                if alpha == 1:
                    raise Exception(
                            "Cannot fill missing values in test if alpha is "
                            "1.")
                # calculate prior of variance
                prior_var = beta / (alpha - 1)
                # encode the values exists only in test dataset
                test_processed['encoded_var' +
                               "_" +
                               col] = test_processed['encoded_var' +
                                                     "_" +
                                                     col].fillna(prior_var)
                # drop orignial columns
                test_processed = test_processed.drop(columns=col, axis=1)
            train_processed = train_processed.drop(columns=col, axis=1)
    # for binary case
    else:
        # check if the prior specification are valid
        if set(prior_params.keys()).issubset(set(["alpha", "beta"])) is False:
            raise Exception(
                "Invalid prior specification. The dictionary must include"
                "keys alpha, beta for binary classification.")
        if prior_params['alpha'] <= 0 or prior_params['beta'] <= 0:
            raise Exception(
                "Invalid prior specification. alpha and beta should all be"
                "positive.")
        # check if the response variable is binary
        if len(set(y)) != 2:
            raise Exception(
                "Binary classification can only have two unique values.")
        y = y.copy()
        if y.dtype == "object":
            y = np.where(y == y.unique()[0], 0, 1)
        # set prior parameters
        alpha = prior_params['alpha']
        beta = prior_params['beta']
        n = X_train.shape[0]
        train_processed = X_train.copy()
        if X_test is not None:
            test_processed = X_test.copy()
        for col in cat_columns:
            # calculate the sum from column y
            conditionals = train_processed.groupby(
                col)[y.name].aggregate(['sum'])
            conditionals.columns = ['encoded_sum']
            # set posterior value for parameters
            alpha_post = alpha + conditionals['encoded_sum']
            beta_post = beta + n - conditionals['encoded_sum']
            posterior_mean = (alpha_post / (alpha_post + beta_post)).to_dict()
            # map the encoded values from training dataset
            train_processed.loc[:, col] = train_processed[col].map(
                posterior_mean)
            # map the encoded values from testing dataset
            if X_test is not None:
                prior_mean = alpha / (alpha + beta)
                test_processed.loc[:, col] = test_processed[col].map(
                    posterior_mean)
                test_processed.loc[:,
                                   col] = test_processed[col].fillna(
                                       prior_mean)
    return [
        train_processed,
        test_processed] if X_test is not None else [train_processed] | 
| 
	Python | 
	def frequency_encoder(X_train, cat_columns, X_test=None, prior=0.5):
    """This function encodes categorical variables using the frequencies
    of each category.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set
          containing some categorical features/columns.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    prior : float
          A number in [0, inf] that acts as pseudo counts when calculating
          the encodings. Useful for preventing encodings of 0 for when the
          training set does not have particular categories observed in the
          test set. A larger value gives less weight to what is observed in
          the training set. A value of 0 incorporates no prior information.
          The default value is 0.5.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    Examples
    -------
    >>> encodings = frequency_encoder(
    my_train,
    my_test,
    cat_columns = ['foo'])
    >>> train_new = encodings[0]
    """
    includes_X_test = (X_test is not None)
    if includes_X_test:
        train_processed = X_train.copy()
        test_processed = X_test.copy()
        for col in cat_columns:
            encoding_col = pd.DataFrame(
                X_train[col].value_counts(
                    normalize=True)).reset_index()
            encoding_col = encoding_col.rename(
                columns={col: 'freq', 'index': col})
            # encode train data
            encoded_train_col = pd.merge(
                X_train,
                encoding_col,
                on=[col],
                how='left').set_index([X_train.index])[['freq']]
            train_processed[col] = encoded_train_col['freq']
            # encode test data
            encoded_test_col = pd.merge(
                X_test,
                encoding_col,
                on=[col],
                how='left').set_index([X_test.index])[['freq']]
            # If a category existed in the train data that did not
            # exist in the test data make the frequency 0
            encoded_test_col = encoded_test_col.fillna(0)
            test_processed[col] = encoded_test_col['freq']
            return [train_processed, test_processed]
    else:
        train_processed = X_train.copy()
        for col in cat_columns:
            encoding_col = pd.DataFrame(
                X_train[col].value_counts(
                    normalize=True)).reset_index()
            encoding_col = encoding_col.rename(
                columns={col: 'freq', 'index': col})
            # encode train data
            encoded_train_col = pd.merge(
                X_train,
                encoding_col,
                on=[col],
                how='left').set_index([X_train.index])[['freq']]
            train_processed[col] = encoded_train_col['freq']
            return train_processed | 
	def frequency_encoder(X_train, cat_columns, X_test=None, prior=0.5):
    """This function encodes categorical variables using the frequencies
    of each category.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set
          containing some categorical features/columns.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    prior : float
          A number in [0, inf] that acts as pseudo counts when calculating
          the encodings. Useful for preventing encodings of 0 for when the
          training set does not have particular categories observed in the
          test set. A larger value gives less weight to what is observed in
          the training set. A value of 0 incorporates no prior information.
          The default value is 0.5.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    Examples
    -------
    >>> encodings = frequency_encoder(
    my_train,
    my_test,
    cat_columns = ['foo'])
    >>> train_new = encodings[0]
    """
    includes_X_test = (X_test is not None)
    if includes_X_test:
        train_processed = X_train.copy()
        test_processed = X_test.copy()
        for col in cat_columns:
            encoding_col = pd.DataFrame(
                X_train[col].value_counts(
                    normalize=True)).reset_index()
            encoding_col = encoding_col.rename(
                columns={col: 'freq', 'index': col})
            # encode train data
            encoded_train_col = pd.merge(
                X_train,
                encoding_col,
                on=[col],
                how='left').set_index([X_train.index])[['freq']]
            train_processed[col] = encoded_train_col['freq']
            # encode test data
            encoded_test_col = pd.merge(
                X_test,
                encoding_col,
                on=[col],
                how='left').set_index([X_test.index])[['freq']]
            # If a category existed in the train data that did not
            # exist in the test data make the frequency 0
            encoded_test_col = encoded_test_col.fillna(0)
            test_processed[col] = encoded_test_col['freq']
            return [train_processed, test_processed]
    else:
        train_processed = X_train.copy()
        for col in cat_columns:
            encoding_col = pd.DataFrame(
                X_train[col].value_counts(
                    normalize=True)).reset_index()
            encoding_col = encoding_col.rename(
                columns={col: 'freq', 'index': col})
            # encode train data
            encoded_train_col = pd.merge(
                X_train,
                encoding_col,
                on=[col],
                how='left').set_index([X_train.index])[['freq']]
            train_processed[col] = encoded_train_col['freq']
            return train_processed | 
| 
	Python | 
	def target_encoder(
        X_train,
        y,
        cat_columns,
        X_test=None,
        prior=0.5,
        objective='regression'):
    """
    This function encodes categorical variables with average target values for
    each category.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set containing
          some categorical features/columns.
    y : pd.Series
          A pandas series representing the target variable. If the objective
          is "binary", then this series should only contain two unique
          values.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    prior : float
          A number in [0, inf] that acts as pseudo counts when calculating
          the encodings. Useful for preventing encodings of 0 for when the
          training set does not have particular categories observed in the
          test set. A larger value gives less weight to what is observed in
          the training set. A value of 0 incorporates no prior information.
          The default value is 0.5.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    objective : string
          A string, either "regression" or "binary" specifying the problem.
          Default is regression.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    Examples
    -------
    >>> encodings = target_encoder(
    my_train,
    my_train['y'],
    cat_columns = ['foo'],
    prior = 0.5,
    my_test,
    'regression')
    >>> train_new = encodings[0]
    """
    # check input of objective
    if (objective != 'regression') & (objective != 'binary'):
        raise Exception("objective must be regression or binary.")
    # check if cat_columns is a list
    if isinstance(cat_columns, list) is False:
        raise Exception("Type of cat_columns must be a list")
    # check if prior is a numeric value
    if (isinstance(prior, float) | isinstance(prior, int)) is False:
        raise Exception("Type of prior must be a numeric value")
    # check if y is a pandas series
    if isinstance(y, pd.Series) is False:
        raise Exception("Type of y must be pd.Series")
    # check if length y equals to length X_train
    if len(y) != len(X_train):
        raise Exception("Input y must equal to X_train")
    # check if X_train is pandas dataframe
    if isinstance(X_train, pd.DataFrame) is False:
        raise Exception("Type of X_train must be pd.Dataframe")
    # check if X_train contains cat_columns
    if set(cat_columns).issubset(X_train.columns) is False:
        raise Exception("X_train must contain cat_columns")
    y_new = y.copy()
    # check if target variable is numeric for regression objective
    if objective == 'regression':
        if (y.dtype != 'int64') & (y.dtype != 'float64'):
            raise Exception("The target variable must be numeric.")
        # for binary objective
    else:
        # check if target is binary
        if y.nunique() != 2:
            raise Exception("The target variable must be binary")
        # encode target to 0 or 1
        if (y.dtype != 'int64') & (y.dtype != 'float64'):
            y_new = y.replace({y.unique()[0]: 0, y.unique()[1]: 1})
    # Check when X_test is none
    if X_test is None:
        train_processed = X_train.copy()
        for col in cat_columns:
            global_mean = np.nanmean(y_new)
            # calculate target counts for each category and save to dictionary
            search_table = train_processed.groupby(
                col)[y_new.name].aggregate(['sum', 'count'])
            search_table['encodings'] = ((
                search_table['sum'] +
                prior * global_mean) / (search_table['count'] + prior))
            search_table = search_table.drop(
                columns=['sum', 'count'], axis=1).to_dict()['encodings']
            # encode categorical columns for training dataset
            train_processed.loc[:,
                                col] = train_processed[col].map(search_table)
        return [train_processed]
    # Check when X_test is not none
    else:
        # check if X_test is pandas dataframe
        if isinstance(X_test, pd.DataFrame) is False:
            raise Exception("Type of X_test must be pd.Dataframe")
        # check if X_test contains cat_columns
        if set(cat_columns).issubset(X_test.columns) is False:
            raise Exception("X_test must contain cat_columns")
        train_processed = X_train.copy()
        test_processed = X_test.copy()
        for col in cat_columns:
            global_mean = np.nanmean(y_new)
            # calculate target counts for each category and save to dictionary
            search_table = train_processed.groupby(
                col)[y_new.name].aggregate(['sum', 'count'])
            search_table['encodings'] = ((
                search_table['sum'] +
                prior * global_mean) / (search_table['count'] + prior))
            search_table = search_table.drop(
                columns=['sum', 'count'], axis=1).to_dict()['encodings']
            # encode categorical columns for training dataset
            train_processed.loc[:,
                                col] = train_processed[col].map(search_table)
            # encode categorical columns for testing dataset
            test_processed.loc[:, col] = test_processed[col].map(search_table)
            test_processed.loc[:,
                               col] = test_processed[col].fillna(global_mean)
        return [train_processed, test_processed] | 
	def target_encoder(
        X_train,
        y,
        cat_columns,
        X_test=None,
        prior=0.5,
        objective='regression'):
    """
    This function encodes categorical variables with average target values for
    each category.
    Parameters
    ----------
    X_train : pd.DataFrame
          A pandas dataframe representing the training data set containing
          some categorical features/columns.
    y : pd.Series
          A pandas series representing the target variable. If the objective
          is "binary", then this series should only contain two unique
          values.
    cat_columns : list
          The names of the categorical features in X_train and/or X_test.
    prior : float
          A number in [0, inf] that acts as pseudo counts when calculating
          the encodings. Useful for preventing encodings of 0 for when the
          training set does not have particular categories observed in the
          test set. A larger value gives less weight to what is observed in
          the training set. A value of 0 incorporates no prior information.
          The default value is 0.5.
    X_test : pd.DataFrame
          A pandas dataframe representing the test set, containing some set
          of categorical features/columns. This is an optional argument.
    objective : string
          A string, either "regression" or "binary" specifying the problem.
          Default is regression.
    Returns
    -------
    train_processed : pd.DataFrame
          The training set, with the categorical columns specified by the
          argument cat_columns replaced by their encodings.
    test_processed : pd.DataFrame
          The test set, with the categorical columns specified by the argument
          cat_columns replaced by the learned encodings from the training set.
          This is not returned if X_test is None.
    Examples
    -------
    >>> encodings = target_encoder(
    my_train,
    my_train['y'],
    cat_columns = ['foo'],
    prior = 0.5,
    my_test,
    'regression')
    >>> train_new = encodings[0]
    """
    # check input of objective
    if (objective != 'regression') & (objective != 'binary'):
        raise Exception("objective must be regression or binary.")
    # check if cat_columns is a list
    if isinstance(cat_columns, list) is False:
        raise Exception("Type of cat_columns must be a list")
    # check if prior is a numeric value
    if (isinstance(prior, float) | isinstance(prior, int)) is False:
        raise Exception("Type of prior must be a numeric value")
    # check if y is a pandas series
    if isinstance(y, pd.Series) is False:
        raise Exception("Type of y must be pd.Series")
    # check if length y equals to length X_train
    if len(y) != len(X_train):
        raise Exception("Input y must equal to X_train")
    # check if X_train is pandas dataframe
    if isinstance(X_train, pd.DataFrame) is False:
        raise Exception("Type of X_train must be pd.Dataframe")
    # check if X_train contains cat_columns
    if set(cat_columns).issubset(X_train.columns) is False:
        raise Exception("X_train must contain cat_columns")
    y_new = y.copy()
    # check if target variable is numeric for regression objective
    if objective == 'regression':
        if (y.dtype != 'int64') & (y.dtype != 'float64'):
            raise Exception("The target variable must be numeric.")
        # for binary objective
    else:
        # check if target is binary
        if y.nunique() != 2:
            raise Exception("The target variable must be binary")
        # encode target to 0 or 1
        if (y.dtype != 'int64') & (y.dtype != 'float64'):
            y_new = y.replace({y.unique()[0]: 0, y.unique()[1]: 1})
    # Check when X_test is none
    if X_test is None:
        train_processed = X_train.copy()
        for col in cat_columns:
            global_mean = np.nanmean(y_new)
            # calculate target counts for each category and save to dictionary
            search_table = train_processed.groupby(
                col)[y_new.name].aggregate(['sum', 'count'])
            search_table['encodings'] = ((
                search_table['sum'] +
                prior * global_mean) / (search_table['count'] + prior))
            search_table = search_table.drop(
                columns=['sum', 'count'], axis=1).to_dict()['encodings']
            # encode categorical columns for training dataset
            train_processed.loc[:,
                                col] = train_processed[col].map(search_table)
        return [train_processed]
    # Check when X_test is not none
    else:
        # check if X_test is pandas dataframe
        if isinstance(X_test, pd.DataFrame) is False:
            raise Exception("Type of X_test must be pd.Dataframe")
        # check if X_test contains cat_columns
        if set(cat_columns).issubset(X_test.columns) is False:
            raise Exception("X_test must contain cat_columns")
        train_processed = X_train.copy()
        test_processed = X_test.copy()
        for col in cat_columns:
            global_mean = np.nanmean(y_new)
            # calculate target counts for each category and save to dictionary
            search_table = train_processed.groupby(
                col)[y_new.name].aggregate(['sum', 'count'])
            search_table['encodings'] = ((
                search_table['sum'] +
                prior * global_mean) / (search_table['count'] + prior))
            search_table = search_table.drop(
                columns=['sum', 'count'], axis=1).to_dict()['encodings']
            # encode categorical columns for training dataset
            train_processed.loc[:,
                                col] = train_processed[col].map(search_table)
            # encode categorical columns for testing dataset
            test_processed.loc[:, col] = test_processed[col].map(search_table)
            test_processed.loc[:,
                               col] = test_processed[col].fillna(global_mean)
        return [train_processed, test_processed] | 
| 
	Python | 
	async def ping(self, ctx):
        """ Shows latency and API response times.
        usage:: ping
        details:: This command is a response test, it helps gauge if there is any latency (lag) in either the bots
        connections, or the API.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # send 1st message as starting time
        embed = self._create_embed(guild_config["language"].cmdPing_ping)
        msg = await ctx.send(embed=embed)
        # send 2nd message as ending time
        embed = self._create_embed(guild_config["language"].cmdPing_pong)
        await msg.edit(embed=embed)
        # send 3rd message for display roundtrip time
        embed = self._create_embed(guild_config["language"].cmdPing_roundtrip.format(self._get_roundtrip(msg),
                                                                                     round(self.bot.latency, 2)))
        await msg.edit(embed=embed) | 
	async def ping(self, ctx):
        """ Shows latency and API response times.
        usage:: ping
        details:: This command is a response test, it helps gauge if there is any latency (lag) in either the bots
        connections, or the API.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # send 1st message as starting time
        embed = self._create_embed(guild_config["language"].cmdPing_ping)
        msg = await ctx.send(embed=embed)
        # send 2nd message as ending time
        embed = self._create_embed(guild_config["language"].cmdPing_pong)
        await msg.edit(embed=embed)
        # send 3rd message for display roundtrip time
        embed = self._create_embed(guild_config["language"].cmdPing_roundtrip.format(self._get_roundtrip(msg),
                                                                                     round(self.bot.latency, 2)))
        await msg.edit(embed=embed) | 
| 
	Python | 
	async def server_info(self, ctx):
        """ Displays server information & statistics.
        usage:: server_info
        details:: This command will return an organised embed with server information and statistics.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # Prepare embed
        desc = guild_config["language"].cmdServerInfo_desc.format(ctx.guild.owner.display_name, ctx.guild.owner.id)
        time = datetime.datetime.now(get_localzone())
        created_at = ctx.guild.created_at.strftime(guild_config["language"].dateTimeFormat)
        bot_user = [m for m in ctx.guild.members if m.bot]
        true_member_count = ctx.guild.member_count - len(bot_user)
        member_count = '{0} + {1} bots'.format(true_member_count, len(bot_user))
        embed = discord.Embed(colour=3447003, description=desc, timestamp=time)
        embed.add_field(name=guild_config["language"].cmdServerInfo_memCount, value=member_count)
        embed.add_field(name=guild_config["language"].cmdServerInfo_location, value=ctx.guild.region)
        embed.add_field(name=guild_config["language"].cmdServerInfo_created, value=created_at)
        embed.add_field(name=guild_config["language"].cmdServerInfo_roles, value=str(len(ctx.guild.roles)))
        embed.add_field(name="\u200b", value="\u200b")
        embed.set_footer(text=self.bot.user.display_name, icon_url=self.bot.user.avatar_url)
        embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
        # Send message
        await ctx.send(embed=embed) | 
	async def server_info(self, ctx):
        """ Displays server information & statistics.
        usage:: server_info
        details:: This command will return an organised embed with server information and statistics.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # Prepare embed
        desc = guild_config["language"].cmdServerInfo_desc.format(ctx.guild.owner.display_name, ctx.guild.owner.id)
        time = datetime.datetime.now(get_localzone())
        created_at = ctx.guild.created_at.strftime(guild_config["language"].dateTimeFormat)
        bot_user = [m for m in ctx.guild.members if m.bot]
        true_member_count = ctx.guild.member_count - len(bot_user)
        member_count = '{0} + {1} bots'.format(true_member_count, len(bot_user))
        embed = discord.Embed(colour=3447003, description=desc, timestamp=time)
        embed.add_field(name=guild_config["language"].cmdServerInfo_memCount, value=member_count)
        embed.add_field(name=guild_config["language"].cmdServerInfo_location, value=ctx.guild.region)
        embed.add_field(name=guild_config["language"].cmdServerInfo_created, value=created_at)
        embed.add_field(name=guild_config["language"].cmdServerInfo_roles, value=str(len(ctx.guild.roles)))
        embed.add_field(name="\u200b", value="\u200b")
        embed.set_footer(text=self.bot.user.display_name, icon_url=self.bot.user.avatar_url)
        embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
        # Send message
        await ctx.send(embed=embed) | 
| 
	Python | 
	async def stats(self, ctx):
        """ Displays bot information & statistics.
        usage:: botInfo
        details:: This command will return an organised embed with bot information and statistics.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # Prepare embed
        time = datetime.datetime.now(get_localzone())
        uptime = self._get_readable_time(self.startTime, time)
        chan_count = 0
        online_mem_count = 0
        total_mem_count = 0
        for guild in self.bot.guilds:
            chan_count += len([chan for chan in guild.channels if isinstance(chan.category, discord.CategoryChannel)])
            total_mem_count += guild.member_count
            online_mem_count += len([mem for mem in guild.members if mem.status == discord.Status.online])
        embed = discord.Embed(colour=3447003, title=guild_config["language"].cmdBotInfo_title,
                              description=guild_config["language"].cmdBotInfo_desc,
                              timestamp=time)
        embed.add_field(name=guild_config["language"].cmdBotInfo_status,
                        value=guild_config["language"].cmdBotInfo_statusVal)
        embed.add_field(name=guild_config["language"].cmdBotInfo_uptime, value=uptime)
        embed.add_field(name=guild_config["language"].cmdBotInfo_latency,
                        value="{0} ms".format(round(self.bot.latency, 2)))
        embed.add_field(name=guild_config["language"].cmdBotInfo_guilds, value="{0}".format(len(self.bot.guilds)))
        embed.add_field(name=guild_config["language"].cmdBotInfo_members,
                        value=guild_config["language"].cmdBotInfo_membersVal.format(online_mem_count, total_mem_count))
        embed.add_field(name=guild_config["language"].cmdBotInfo_channels, value="{0}".format(chan_count))
        embed.add_field(name=guild_config["language"].cmdBotInfo_ram,
                        value="{:.2f} MiB".format(self.process.memory_full_info().uss / 2 ** 20))
        embed.add_field(name=guild_config["language"].cmdBotInfo_cpu,
                        value="{:.2f}% CPU".format(self.process.cpu_percent()))
        embed.add_field(name=guild_config["language"].cmdBotInfo_lib, value="discord.py (rewrite)")
        embed.set_footer(text="Bot ID: {0}".format(self.bot.user.id))
        await ctx.send(embed=embed) | 
	async def stats(self, ctx):
        """ Displays bot information & statistics.
        usage:: botInfo
        details:: This command will return an organised embed with bot information and statistics.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # Prepare embed
        time = datetime.datetime.now(get_localzone())
        uptime = self._get_readable_time(self.startTime, time)
        chan_count = 0
        online_mem_count = 0
        total_mem_count = 0
        for guild in self.bot.guilds:
            chan_count += len([chan for chan in guild.channels if isinstance(chan.category, discord.CategoryChannel)])
            total_mem_count += guild.member_count
            online_mem_count += len([mem for mem in guild.members if mem.status == discord.Status.online])
        embed = discord.Embed(colour=3447003, title=guild_config["language"].cmdBotInfo_title,
                              description=guild_config["language"].cmdBotInfo_desc,
                              timestamp=time)
        embed.add_field(name=guild_config["language"].cmdBotInfo_status,
                        value=guild_config["language"].cmdBotInfo_statusVal)
        embed.add_field(name=guild_config["language"].cmdBotInfo_uptime, value=uptime)
        embed.add_field(name=guild_config["language"].cmdBotInfo_latency,
                        value="{0} ms".format(round(self.bot.latency, 2)))
        embed.add_field(name=guild_config["language"].cmdBotInfo_guilds, value="{0}".format(len(self.bot.guilds)))
        embed.add_field(name=guild_config["language"].cmdBotInfo_members,
                        value=guild_config["language"].cmdBotInfo_membersVal.format(online_mem_count, total_mem_count))
        embed.add_field(name=guild_config["language"].cmdBotInfo_channels, value="{0}".format(chan_count))
        embed.add_field(name=guild_config["language"].cmdBotInfo_ram,
                        value="{:.2f} MiB".format(self.process.memory_full_info().uss / 2 ** 20))
        embed.add_field(name=guild_config["language"].cmdBotInfo_cpu,
                        value="{:.2f}% CPU".format(self.process.cpu_percent()))
        embed.add_field(name=guild_config["language"].cmdBotInfo_lib, value="discord.py (rewrite)")
        embed.set_footer(text="Bot ID: {0}".format(self.bot.user.id))
        await ctx.send(embed=embed) | 
| 
	Python | 
	async def user_info(self, ctx, *, member: discord.Member=None):
        """ Get detailed info for a nominated user.
        usage:: user [@mention|userid]
        details:: This command will get information on either a nominated user, or yourself.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # Check member
        if member is None:
            member = ctx.author
        # Prepare embed
        time = datetime.datetime.now(get_localzone())
        created = member.created_at.strftime(guild_config["language"].dateTimeFormat)
        account_seniority = (datetime.datetime.now() - member.created_at).days
        joined = member.joined_at.strftime(guild_config["language"].dateTimeFormat)
        guild_seniority = (datetime.datetime.now() - member.joined_at).days
        user_score = next(scr for scr in self.bot.db.scores
                          if scr["guild_id"] == ctx.guild.id and scr["user_id"] == member.id)
        user_roles = ""
        for i in range(len(member.roles)-1, -1, -1):
            if member.roles[i].name == "@everyone":
                continue
            user_roles += f'{member.roles[i].mention} '
        embed = discord.Embed(colour=member.top_role.colour, timestamp=time,
                              title=member.display_name, description=user_roles,
                              icon_url=member.avatar_url)
        embed.set_thumbnail(url=member.avatar_url)
        embed.add_field(name=guild_config["language"].cmdUserInfo_created,
                        value=guild_config["language"].cmdUserInfo_day.format(created, account_seniority),
                        inline=False)
        embed.add_field(name=guild_config["language"].cmdUserInfo_joined,
                        value=guild_config["language"].cmdUserInfo_day.format(joined, guild_seniority),
                        inline=False)
        embed.add_field(name=guild_config["language"].cmdUserInfo_score,
                        value=f'{user_score["score"]}/20',
                        inline=False)
        # Send embed
        await ctx.send(embed=embed) | 
	async def user_info(self, ctx, *, member: discord.Member=None):
        """ Get detailed info for a nominated user.
        usage:: user [@mention|userid]
        details:: This command will get information on either a nominated user, or yourself.
        """
        # retrieve guild config
        guild_config = next(cfg for cfg in self.bot.db.configs if cfg["guild_id"] == ctx.guild.id)
        # Check member
        if member is None:
            member = ctx.author
        # Prepare embed
        time = datetime.datetime.now(get_localzone())
        created = member.created_at.strftime(guild_config["language"].dateTimeFormat)
        account_seniority = (datetime.datetime.now() - member.created_at).days
        joined = member.joined_at.strftime(guild_config["language"].dateTimeFormat)
        guild_seniority = (datetime.datetime.now() - member.joined_at).days
        user_score = next(scr for scr in self.bot.db.scores
                          if scr["guild_id"] == ctx.guild.id and scr["user_id"] == member.id)
        user_roles = ""
        for i in range(len(member.roles)-1, -1, -1):
            if member.roles[i].name == "@everyone":
                continue
            user_roles += f'{member.roles[i].mention} '
        embed = discord.Embed(colour=member.top_role.colour, timestamp=time,
                              title=member.display_name, description=user_roles,
                              icon_url=member.avatar_url)
        embed.set_thumbnail(url=member.avatar_url)
        embed.add_field(name=guild_config["language"].cmdUserInfo_created,
                        value=guild_config["language"].cmdUserInfo_day.format(created, account_seniority),
                        inline=False)
        embed.add_field(name=guild_config["language"].cmdUserInfo_joined,
                        value=guild_config["language"].cmdUserInfo_day.format(joined, guild_seniority),
                        inline=False)
        embed.add_field(name=guild_config["language"].cmdUserInfo_score,
                        value=f'{user_score["score"]}/20',
                        inline=False)
        # Send embed
        await ctx.send(embed=embed) | 
| 
	Python | 
	async def faq_command(self, ctx, *, query: str=""):
        """
        Shows the list of available FAQ tags.
        """
        query = query.lower()
        if not query:
            faqstr = self.tags()
            faqstr.sort()
            em = discord.Embed(title="List of FAQ tags",
                               description=', '.join(faqstr).title(),
                               colour=0xDFDE6E)
        elif self.search_faq(ctx.guild.id, query):
            em = self.embed_faq(ctx, query)
        else:
            close_items = []
            for item in self.tags():
                if fuzz.ratio(query, item) >= 75:
                    close_items.append(item.title())
            if len(close_items) > 0:
                if len(close_items) == 1:
                    em = self.embed_faq(ctx, close_items[0].lower(),
                                        title=f"Could not find \"{query.title()}\" "
                                              f"in FAQ tags. Did you mean \"{close_items[0]}\"?",
                                        color=0xFF8C00)
                else:
                    em = discord.Embed(title=f"Could not find \"{query.title()}\" in FAQ tags.",
                                       description=f"Did you mean {', '.join(close_items)}?",
                                       colour=0xFF8C00)
            else:
                em = discord.Embed(title="Error",
                                   description=f"Could not find \"{query.title()}\" "
                                               f"or any similarly named tags in FAQ tags.",
                                   colour=0xDC143C)
                em.set_footer(text=f"To see the list of all available FAQ tags, use {ctx.prefix}faq",
                              icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/"
                                       f"{self.bot.user.avatar}.png?size=64")
        await ctx.send(embed=em) | 
	async def faq_command(self, ctx, *, query: str=""):
        """
        Shows the list of available FAQ tags.
        """
        query = query.lower()
        if not query:
            faqstr = self.tags()
            faqstr.sort()
            em = discord.Embed(title="List of FAQ tags",
                               description=', '.join(faqstr).title(),
                               colour=0xDFDE6E)
        elif self.search_faq(ctx.guild.id, query):
            em = self.embed_faq(ctx, query)
        else:
            close_items = []
            for item in self.tags():
                if fuzz.ratio(query, item) >= 75:
                    close_items.append(item.title())
            if len(close_items) > 0:
                if len(close_items) == 1:
                    em = self.embed_faq(ctx, close_items[0].lower(),
                                        title=f"Could not find \"{query.title()}\" "
                                              f"in FAQ tags. Did you mean \"{close_items[0]}\"?",
                                        color=0xFF8C00)
                else:
                    em = discord.Embed(title=f"Could not find \"{query.title()}\" in FAQ tags.",
                                       description=f"Did you mean {', '.join(close_items)}?",
                                       colour=0xFF8C00)
            else:
                em = discord.Embed(title="Error",
                                   description=f"Could not find \"{query.title()}\" "
                                               f"or any similarly named tags in FAQ tags.",
                                   colour=0xDC143C)
                em.set_footer(text=f"To see the list of all available FAQ tags, use {ctx.prefix}faq",
                              icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/"
                                       f"{self.bot.user.avatar}.png?size=64")
        await ctx.send(embed=em) | 
| 
	Python | 
	async def faq_add(self, ctx, title: str, *, content: str = ""):
        """
        Add a new tag to the FAQ tags.
        Can add an image by either attaching it to the message, or using ~~ imageurl at the end.
        """
        updatebool = True
        title = title.lower()
        try:
            image_url = content.split('~~')[1].strip()
            content = content.split('~~')[0].strip()
        except IndexError:
            image_url = ""
        if len(title) > 256:
            em = discord.Embed(title="Error",
                               description="The title inputted is too long.\n"
                                           "The maximum title length is 256 characters.",
                               colour=0xDC143C)
            await ctx.send(embed=em)
            return
        if (not content and
                not ctx.message.attachments and
                not image_url):
            em = discord.Embed(title="Error",
                               description="Content is required to add an FAQ tag.",
                               colour=0xDC143C)
            # em.set_footer(text=self.bot.user.name, icon_url=f"https://cdn.discordapp.com/avatars/
            #               {self.bot.user.id}/{self.bot.user.avatar}.png?size=64")
            await ctx.send(embed=em)
            return
        else:
            currentfaq = {"guild_id": ctx.guild.id,
                          "tag": title,
                          "content": content,
                          "image": "",
                          "timestamp": datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
                          "creator": str(ctx.message.author.id)}
            if image_url:
                if not await self.check_image(ctx, currentfaq, image_url):
                    updatebool = False
            elif ctx.message.attachments:
                attached_file = ctx.message.attachments[0]
                attached_file_name = attached_file.filename.lower()
                if not await self.check_image(ctx, currentfaq, attached_file_name, attached_file.url):
                    updatebool = False
        if updatebool:
            # check if tag existed
            for faq in self.bot.db.faqs:
                if title == faq["tag"] and ctx.guild.id == faq["guild_id"]:
                    currentfaq.update({"creator": faq["creator"]})
                    self.bot.db.edit_faq(currentfaq)
                    faq.update(currentfaq)
                    embed_title = f"Successfully edited \"{title.title()}\" in database"
                    break
            else:
                self.bot.db.save_faq(currentfaq)
                self.bot.db.faqs.append(currentfaq)
                embed_title = f"Successfully added \"{title.title()}\" to database"
            await ctx.send(embed=self.embed_faq(ctx, title, embed_title, 0x19B300)) | 
	async def faq_add(self, ctx, title: str, *, content: str = ""):
        """
        Add a new tag to the FAQ tags.
        Can add an image by either attaching it to the message, or using ~~ imageurl at the end.
        """
        updatebool = True
        title = title.lower()
        try:
            image_url = content.split('~~')[1].strip()
            content = content.split('~~')[0].strip()
        except IndexError:
            image_url = ""
        if len(title) > 256:
            em = discord.Embed(title="Error",
                               description="The title inputted is too long.\n"
                                           "The maximum title length is 256 characters.",
                               colour=0xDC143C)
            await ctx.send(embed=em)
            return
        if (not content and
                not ctx.message.attachments and
                not image_url):
            em = discord.Embed(title="Error",
                               description="Content is required to add an FAQ tag.",
                               colour=0xDC143C)
            # em.set_footer(text=self.bot.user.name, icon_url=f"https://cdn.discordapp.com/avatars/
            #               {self.bot.user.id}/{self.bot.user.avatar}.png?size=64")
            await ctx.send(embed=em)
            return
        else:
            currentfaq = {"guild_id": ctx.guild.id,
                          "tag": title,
                          "content": content,
                          "image": "",
                          "timestamp": datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
                          "creator": str(ctx.message.author.id)}
            if image_url:
                if not await self.check_image(ctx, currentfaq, image_url):
                    updatebool = False
            elif ctx.message.attachments:
                attached_file = ctx.message.attachments[0]
                attached_file_name = attached_file.filename.lower()
                if not await self.check_image(ctx, currentfaq, attached_file_name, attached_file.url):
                    updatebool = False
        if updatebool:
            # check if tag existed
            for faq in self.bot.db.faqs:
                if title == faq["tag"] and ctx.guild.id == faq["guild_id"]:
                    currentfaq.update({"creator": faq["creator"]})
                    self.bot.db.edit_faq(currentfaq)
                    faq.update(currentfaq)
                    embed_title = f"Successfully edited \"{title.title()}\" in database"
                    break
            else:
                self.bot.db.save_faq(currentfaq)
                self.bot.db.faqs.append(currentfaq)
                embed_title = f"Successfully added \"{title.title()}\" to database"
            await ctx.send(embed=self.embed_faq(ctx, title, embed_title, 0x19B300)) | 
| 
	Python | 
	async def faq_remove(self, ctx, *, title: str):
        """
        Remove a tag from the FAQ tags.
        """
        title = title.lower()
        faquery = self.search_faq(ctx.guild.id, title)
        if faquery:
            em = self.embed_faq(ctx=ctx,
                                query=title,
                                title=f"Successfully removed \"{title.title()}\" from FAQ tags.",
                                color=0xDC143C)
            self.bot.db.faqs = [faq
                                for faq in self.bot.db.faqs
                                if ctx.guild.id != faq["guild_id"] or title != faq["tag"]]
            self.bot.db.delete_faq(ctx.guild.id, title)
        else:
            em = discord.Embed(title="Error",
                               description="Query not in FAQ tags.",
                               colour=0xDC143C)
        await ctx.send(embed=em) | 
	async def faq_remove(self, ctx, *, title: str):
        """
        Remove a tag from the FAQ tags.
        """
        title = title.lower()
        faquery = self.search_faq(ctx.guild.id, title)
        if faquery:
            em = self.embed_faq(ctx=ctx,
                                query=title,
                                title=f"Successfully removed \"{title.title()}\" from FAQ tags.",
                                color=0xDC143C)
            self.bot.db.faqs = [faq
                                for faq in self.bot.db.faqs
                                if ctx.guild.id != faq["guild_id"] or title != faq["tag"]]
            self.bot.db.delete_faq(ctx.guild.id, title)
        else:
            em = discord.Embed(title="Error",
                               description="Query not in FAQ tags.",
                               colour=0xDC143C)
        await ctx.send(embed=em) | 
| 
	Python | 
	async def wiki(self, ctx, *, searchterm):
        """
        Searches for a term in the [official Factorio wiki](https://wiki.factorio.com/).
        """
        em = discord.Embed(title=f"Searching for \"{searchterm.title()}\" in wiki.factorio.com...",
                           description="This shouldn't take long.",
                           colour=0xDFDE6E)
        # em.set_footer(text=self.bot.user.name,
        #                    icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
        #                             {self.bot.user.avatar}.png?size=64")
        buffer_msg = await ctx.send(embed=em)
        async with ctx.channel.typing():
            async with aiohttp.ClientSession() as client:
                async with client.get(
                        f"https://wiki.factorio.com/index.php?search={searchterm.replace(' ', '%20')}") as resp:
                    r = await resp.text()
                    url = str(resp.url)
            soup = bs4.BeautifulSoup(r, 'html.parser')
            if soup.find('p', class_='mw-search-nonefound'):
                em = discord.Embed(title="Error",
                                   description=f"Could not find \"{searchterm.title()}\" in wiki.",
                                   colour=0xDC143C)
                # em.set_footer(text=self.bot.user.name,
                #               icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
                #                         {self.bot.user.avatar}.png?size=64")
                await buffer_msg.edit(embed=em)
                return
            if soup.find_all('ul', class_="mw-search-results"):
                em = discord.Embed(title="Factorio Wiki",
                                   url=url,
                                   colour=0xDFDE6E)
                for item in soup.find_all('ul', class_="mw-search-results")[0].find_all("li"):
                    item = item.find_next('div', class_="mw-search-result-heading").find('a')
                    itemlink = item['href'] if not item['href'].endswith(")") else item['href'].replace(")", "\)")
                    em.add_field(name=item['title'],
                                 value=f"[Read More](https://wiki.factorio.com{itemlink})",
                                 inline=True)
                # em.set_footer(text=self.bot.user.name,
                #               icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
                #                        {self.bot.user.avatar}.png?size=64")
                await buffer_msg.edit(embed=em)
            else:
                description_ = ""
                if soup.select("#mw-content-text > p"):
                    p_num = 0
                    if re.search(r"((^<br/>$)|(This (article|page)))", str(soup.select("#mw-content-text > p")[0])):
                        p_num = 1
                    description_ = tomd.convert(str(soup.select("#mw-content-text > p")[p_num])) \
                        .strip().replace("<br/>", "\n")
                em = discord.Embed(title=soup.find("h1", id='firstHeading').get_text(),
                                   description=re.sub(r"\((\/\S*)\)", r"(https://wiki.factorio.com\1)", description_),
                                   url=url,
                                   colour=0x19B300)
                if soup.find('div', class_="factorio-icon"):
                    em.set_thumbnail(
                        url=f"https://wiki.factorio.com{soup.find('div', class_='factorio-icon').find('img')['src']}")
                # em.set_footer(text=self.bot.user.name,
                #               icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
                #                        {self.bot.user.avatar}.png?size=64")
                await buffer_msg.edit(embed=em) | 
	async def wiki(self, ctx, *, searchterm):
        """
        Searches for a term in the [official Factorio wiki](https://wiki.factorio.com/).
        """
        em = discord.Embed(title=f"Searching for \"{searchterm.title()}\" in wiki.factorio.com...",
                           description="This shouldn't take long.",
                           colour=0xDFDE6E)
        # em.set_footer(text=self.bot.user.name,
        #                    icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
        #                             {self.bot.user.avatar}.png?size=64")
        buffer_msg = await ctx.send(embed=em)
        async with ctx.channel.typing():
            async with aiohttp.ClientSession() as client:
                async with client.get(
                        f"https://wiki.factorio.com/index.php?search={searchterm.replace(' ', '%20')}") as resp:
                    r = await resp.text()
                    url = str(resp.url)
            soup = bs4.BeautifulSoup(r, 'html.parser')
            if soup.find('p', class_='mw-search-nonefound'):
                em = discord.Embed(title="Error",
                                   description=f"Could not find \"{searchterm.title()}\" in wiki.",
                                   colour=0xDC143C)
                # em.set_footer(text=self.bot.user.name,
                #               icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
                #                         {self.bot.user.avatar}.png?size=64")
                await buffer_msg.edit(embed=em)
                return
            if soup.find_all('ul', class_="mw-search-results"):
                em = discord.Embed(title="Factorio Wiki",
                                   url=url,
                                   colour=0xDFDE6E)
                for item in soup.find_all('ul', class_="mw-search-results")[0].find_all("li"):
                    item = item.find_next('div', class_="mw-search-result-heading").find('a')
                    itemlink = item['href'] if not item['href'].endswith(")") else item['href'].replace(")", "\)")
                    em.add_field(name=item['title'],
                                 value=f"[Read More](https://wiki.factorio.com{itemlink})",
                                 inline=True)
                # em.set_footer(text=self.bot.user.name,
                #               icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
                #                        {self.bot.user.avatar}.png?size=64")
                await buffer_msg.edit(embed=em)
            else:
                description_ = ""
                if soup.select("#mw-content-text > p"):
                    p_num = 0
                    if re.search(r"((^<br/>$)|(This (article|page)))", str(soup.select("#mw-content-text > p")[0])):
                        p_num = 1
                    description_ = tomd.convert(str(soup.select("#mw-content-text > p")[p_num])) \
                        .strip().replace("<br/>", "\n")
                em = discord.Embed(title=soup.find("h1", id='firstHeading').get_text(),
                                   description=re.sub(r"\((\/\S*)\)", r"(https://wiki.factorio.com\1)", description_),
                                   url=url,
                                   colour=0x19B300)
                if soup.find('div', class_="factorio-icon"):
                    em.set_thumbnail(
                        url=f"https://wiki.factorio.com{soup.find('div', class_='factorio-icon').find('img')['src']}")
                # em.set_footer(text=self.bot.user.name,
                #               icon_url=f"https://cdn.discordapp.com/avatars/{self.bot.user.id}/
                #                        {self.bot.user.avatar}.png?size=64")
                await buffer_msg.edit(embed=em) | 
| 
	Python | 
	async def on_ready(self):
        """Event called when the bot is ready"""
        # Check if guilds were added while offline
        for guild in self.guilds:
            self.create_config_guild(guild)
        # Check if members were added while offline
        for guild in self.guilds:
            for member in guild.members:
                self.create_score(guild, member)
        try:
            for g in self.guilds:
                guild_config = next(cfg for cfg in self.db.configs if cfg["guild_id"] == g.id)
                bot_member = g.get_member(config.BOT_ID)
                if bot_member is None:
                    logger.warning(f'Cannot find FactorioBot in member list of guild {g.name}')
                else:
                    await bot_member.edit(nick=f'[{guild_config["prefix"]}] FactorioBot',
                                          reason="FactorioBot's prefix has changed")
        except Forbidden as forbidError:
            print(forbidError)
        # Load cogs
        self.remove_command("help")
        for cog in initial_cogs:
            try:
                self.load_extension(cog)
                logger.info(f'{cog} successfully loaded.')
            except Exception as e:
                logger.error(f'Failed to load extension {cog}.', e) | 
	async def on_ready(self):
        """Event called when the bot is ready"""
        # Check if guilds were added while offline
        for guild in self.guilds:
            self.create_config_guild(guild)
        # Check if members were added while offline
        for guild in self.guilds:
            for member in guild.members:
                self.create_score(guild, member)
        try:
            for g in self.guilds:
                guild_config = next(cfg for cfg in self.db.configs if cfg["guild_id"] == g.id)
                bot_member = g.get_member(config.BOT_ID)
                if bot_member is None:
                    logger.warning(f'Cannot find FactorioBot in member list of guild {g.name}')
                else:
                    await bot_member.edit(nick=f'[{guild_config["prefix"]}] FactorioBot',
                                          reason="FactorioBot's prefix has changed")
        except Forbidden as forbidError:
            print(forbidError)
        # Load cogs
        self.remove_command("help")
        for cog in initial_cogs:
            try:
                self.load_extension(cog)
                logger.info(f'{cog} successfully loaded.')
            except Exception as e:
                logger.error(f'Failed to load extension {cog}.', e) | 
| 
	Python | 
	async def on_guild_join(self, guild):
        """Event called when client join a guild or client create a new guild"""
        # Create new config
        self.create_config_guild(guild)
        # Add score to each member
        for member in guild.members:
            self.create_score(guild, member) | 
	async def on_guild_join(self, guild):
        """Event called when client join a guild or client create a new guild"""
        # Create new config
        self.create_config_guild(guild)
        # Add score to each member
        for member in guild.members:
            self.create_score(guild, member) | 
| 
	Python | 
	async def on_guild_remove(self, guild):
        """Event called when guild is removed from client"""
        # Remove score from each member
        self.db.scores = [scr for scr in self.db.scores if guild.id != scr["guild_id"]]
        for member in guild.members:
            self.db.delete_score(guild.id, member.id)
        # Remove config
        self.db.configs = [cfg for cfg in self.db.configs if guild.id != cfg["guild_id"]]
        self.db.delete_config(guild.id)
        # Remove faq
        self.db.faqs = [faq for faq in self.db.faqs if guild.id != faq["guild_id"]]
        self.db.delete_faq(guild.id) | 
	async def on_guild_remove(self, guild):
        """Event called when guild is removed from client"""
        # Remove score from each member
        self.db.scores = [scr for scr in self.db.scores if guild.id != scr["guild_id"]]
        for member in guild.members:
            self.db.delete_score(guild.id, member.id)
        # Remove config
        self.db.configs = [cfg for cfg in self.db.configs if guild.id != cfg["guild_id"]]
        self.db.delete_config(guild.id)
        # Remove faq
        self.db.faqs = [faq for faq in self.db.faqs if guild.id != faq["guild_id"]]
        self.db.delete_faq(guild.id) | 
| 
	Python | 
	async def on_member_remove(self, member):
        """Event called when a member leave a guild."""
        # Remove score
        self.db.scores = [scr for scr in self.db.scores
                          if member.guild.id != scr["guild_id"] and member.id == scr["user_id"]]
        self.db.delete_score(member.guild.id, member.id) | 
	async def on_member_remove(self, member):
        """Event called when a member leave a guild."""
        # Remove score
        self.db.scores = [scr for scr in self.db.scores
                          if member.guild.id != scr["guild_id"] and member.id == scr["user_id"]]
        self.db.delete_score(member.guild.id, member.id) | 
| 
	Python | 
	def is_admin():
    """Checks if the author has administrator permission"""
    async def predicate(ctx):
        return await check_guild_permissions(ctx, {'administrator': True})
    return commands.check(predicate) | 
	def is_admin():
    """Checks if the author has administrator permission"""
    async def predicate(ctx):
        return await check_guild_permissions(ctx, {'administrator': True})
    return commands.check(predicate) | 
| 
	Python | 
	def is_owner():
    """Checks if the author is the bot's owner"""
    async def predicate(ctx):
        return await ctx.bot.is_owner(ctx.author)
    return commands.check(predicate) | 
	def is_owner():
    """Checks if the author is the bot's owner"""
    async def predicate(ctx):
        return await ctx.bot.is_owner(ctx.author)
    return commands.check(predicate) | 
| 
	Python | 
	def api_call(self, endpoint, payload={}):
        """ low level api call to newsapi.org """
        url = self.api + endpoint
        payload['apiKey'] = self.api_key
        try:
            resp = requests.get(url, params=payload)
        except requests.exceptions as e:
            logging.error(e)
            print(e)
            return
        response = json.loads(resp.text)
        """ on error """
        if resp.status_code != 200:
            self.logger.error("{} {} {}".format(response['message'], response['status'], response['code'], ))
            if resp.status_code   == 400:
                raise BadRequest(response['message'])
            elif resp.status_code == 401:
                raise UnauthorizedRequest(response['message'])
            elif resp.status_code == 429:
                raise ApiRateLimit(response['message'])
            elif resp.status_code == 500:
                raise ServerError(response['message'])
            else:
                """ capture a generic error return code"""
                raise NewsApiError(response['message'])
        """ on success """
        return response | 
	def api_call(self, endpoint, payload={}):
        """ low level api call to newsapi.org """
        url = self.api + endpoint
        payload['apiKey'] = self.api_key
        try:
            resp = requests.get(url, params=payload)
        except requests.exceptions as e:
            logging.error(e)
            print(e)
            return
        response = json.loads(resp.text)
        """ on error """
        if resp.status_code != 200:
            self.logger.error("{} {} {}".format(response['message'], response['status'], response['code'], ))
            if resp.status_code   == 400:
                raise BadRequest(response['message'])
            elif resp.status_code == 401:
                raise UnauthorizedRequest(response['message'])
            elif resp.status_code == 429:
                raise ApiRateLimit(response['message'])
            elif resp.status_code == 500:
                raise ServerError(response['message'])
            else:
                """ capture a generic error return code"""
                raise NewsApiError(response['message'])
        """ on success """
        return response | 
| 
	Python | 
	def sources(self, category=None, language=None, country=None):
        """
        Provides a list of the news sources and blogs available on News API.
        You will need this to programmatically locate the identifier for the
        source you want articles from when querying the /articles endpoint.
        :param category:
        :param language: optional) - The category you would like to get sources for.
        :param country: (optional) The 2-letter ISO 3166-1 code of the country
        :return:
        """
        data = self.api_call(endpoint='sources', payload={'category': category,
                                                          'language': language,
                                                          'country': country})
        return [Source(**s) for s in data['sources']] | 
	def sources(self, category=None, language=None, country=None):
        """
        Provides a list of the news sources and blogs available on News API.
        You will need this to programmatically locate the identifier for the
        source you want articles from when querying the /articles endpoint.
        :param category:
        :param language: optional) - The category you would like to get sources for.
        :param country: (optional) The 2-letter ISO 3166-1 code of the country
        :return:
        """
        data = self.api_call(endpoint='sources', payload={'category': category,
                                                          'language': language,
                                                          'country': country})
        return [Source(**s) for s in data['sources']] | 
| 
	Python | 
	def add_logging_group(argument_parser: ArgumentParser) -> None:
    """
    Add a group and arguments to control the log.
    Use with `support.log.configure_logging`.
    """
    log_group = argument_parser.add_argument_group(title="logging arguments")
    log_group.add_argument(
        "-v",
        "--verbose",
        action="count",
        default=0,
        help="make the log output on the console more verbose",
    )
    log_group.add_argument(
        "--log-file",
        metavar="FILE",
        default=None,
        help="store all the logs into the specified file",
    )
    log_group.add_argument(
        "--loglevel",
        default=logging.WARNING,
        help="set the console log level",
        choices={
            "DEBUG": logging.DEBUG,
            "INFO": logging.INFO,
            "WARN": logging.WARN,
            "ERROR": logging.ERROR,
            "CRITICAL": logging.CRITICAL,
        },
    ) | 
	def add_logging_group(argument_parser: ArgumentParser) -> None:
    """
    Add a group and arguments to control the log.
    Use with `support.log.configure_logging`.
    """
    log_group = argument_parser.add_argument_group(title="logging arguments")
    log_group.add_argument(
        "-v",
        "--verbose",
        action="count",
        default=0,
        help="make the log output on the console more verbose",
    )
    log_group.add_argument(
        "--log-file",
        metavar="FILE",
        default=None,
        help="store all the logs into the specified file",
    )
    log_group.add_argument(
        "--loglevel",
        default=logging.WARNING,
        help="set the console log level",
        choices={
            "DEBUG": logging.DEBUG,
            "INFO": logging.INFO,
            "WARN": logging.WARN,
            "ERROR": logging.ERROR,
            "CRITICAL": logging.CRITICAL,
        },
    ) | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
